summaryrefslogtreecommitdiffstats
path: root/js/src/jit/riscv64
diff options
context:
space:
mode:
Diffstat (limited to 'js/src/jit/riscv64')
-rw-r--r--js/src/jit/riscv64/Architecture-riscv64.cpp100
-rw-r--r--js/src/jit/riscv64/Architecture-riscv64.h513
-rw-r--r--js/src/jit/riscv64/Assembler-riscv64.cpp1548
-rw-r--r--js/src/jit/riscv64/Assembler-riscv64.h685
-rw-r--r--js/src/jit/riscv64/AssemblerMatInt.cpp217
-rw-r--r--js/src/jit/riscv64/CodeGenerator-riscv64.cpp2871
-rw-r--r--js/src/jit/riscv64/CodeGenerator-riscv64.h210
-rw-r--r--js/src/jit/riscv64/LIR-riscv64.h399
-rw-r--r--js/src/jit/riscv64/Lowering-riscv64.cpp1087
-rw-r--r--js/src/jit/riscv64/Lowering-riscv64.h110
-rw-r--r--js/src/jit/riscv64/MacroAssembler-riscv64-inl.h2025
-rw-r--r--js/src/jit/riscv64/MacroAssembler-riscv64.cpp6515
-rw-r--r--js/src/jit/riscv64/MacroAssembler-riscv64.h1224
-rw-r--r--js/src/jit/riscv64/MoveEmitter-riscv64.cpp333
-rw-r--r--js/src/jit/riscv64/MoveEmitter-riscv64.h70
-rw-r--r--js/src/jit/riscv64/Register-riscv64.h186
-rw-r--r--js/src/jit/riscv64/SharedICHelpers-riscv64-inl.h80
-rw-r--r--js/src/jit/riscv64/SharedICHelpers-riscv64.h77
-rw-r--r--js/src/jit/riscv64/SharedICRegisters-riscv64.h38
-rw-r--r--js/src/jit/riscv64/Simulator-riscv64.cpp4718
-rw-r--r--js/src/jit/riscv64/Simulator-riscv64.h1281
-rw-r--r--js/src/jit/riscv64/Trampoline-riscv64.cpp856
-rw-r--r--js/src/jit/riscv64/constant/Base-constant-riscv.cpp247
-rw-r--r--js/src/jit/riscv64/constant/Base-constant-riscv.h1057
-rw-r--r--js/src/jit/riscv64/constant/Constant-riscv-a.h43
-rw-r--r--js/src/jit/riscv64/constant/Constant-riscv-c.h61
-rw-r--r--js/src/jit/riscv64/constant/Constant-riscv-d.h55
-rw-r--r--js/src/jit/riscv64/constant/Constant-riscv-f.h51
-rw-r--r--js/src/jit/riscv64/constant/Constant-riscv-i.h73
-rw-r--r--js/src/jit/riscv64/constant/Constant-riscv-m.h34
-rw-r--r--js/src/jit/riscv64/constant/Constant-riscv-v.h508
-rw-r--r--js/src/jit/riscv64/constant/Constant-riscv-zicsr.h30
-rw-r--r--js/src/jit/riscv64/constant/Constant-riscv-zifencei.h15
-rw-r--r--js/src/jit/riscv64/constant/Constant-riscv64.h68
-rw-r--r--js/src/jit/riscv64/constant/util-riscv64.h82
-rw-r--r--js/src/jit/riscv64/disasm/Disasm-riscv64.cpp2155
-rw-r--r--js/src/jit/riscv64/disasm/Disasm-riscv64.h74
-rw-r--r--js/src/jit/riscv64/extension/base-assembler-riscv.cc517
-rw-r--r--js/src/jit/riscv64/extension/base-assembler-riscv.h219
-rw-r--r--js/src/jit/riscv64/extension/base-riscv-i.cc351
-rw-r--r--js/src/jit/riscv64/extension/base-riscv-i.h273
-rw-r--r--js/src/jit/riscv64/extension/extension-riscv-a.cc123
-rw-r--r--js/src/jit/riscv64/extension/extension-riscv-a.h46
-rw-r--r--js/src/jit/riscv64/extension/extension-riscv-c.cc275
-rw-r--r--js/src/jit/riscv64/extension/extension-riscv-c.h77
-rw-r--r--js/src/jit/riscv64/extension/extension-riscv-d.cc167
-rw-r--r--js/src/jit/riscv64/extension/extension-riscv-d.h68
-rw-r--r--js/src/jit/riscv64/extension/extension-riscv-f.cc158
-rw-r--r--js/src/jit/riscv64/extension/extension-riscv-f.h66
-rw-r--r--js/src/jit/riscv64/extension/extension-riscv-m.cc68
-rw-r--r--js/src/jit/riscv64/extension/extension-riscv-m.h37
-rw-r--r--js/src/jit/riscv64/extension/extension-riscv-v.cc891
-rw-r--r--js/src/jit/riscv64/extension/extension-riscv-v.h484
-rw-r--r--js/src/jit/riscv64/extension/extension-riscv-zicsr.cc44
-rw-r--r--js/src/jit/riscv64/extension/extension-riscv-zicsr.h57
-rw-r--r--js/src/jit/riscv64/extension/extension-riscv-zifencei.cc17
-rw-r--r--js/src/jit/riscv64/extension/extension-riscv-zifencei.h20
57 files changed, 33654 insertions, 0 deletions
diff --git a/js/src/jit/riscv64/Architecture-riscv64.cpp b/js/src/jit/riscv64/Architecture-riscv64.cpp
new file mode 100644
index 0000000000..ea4a364b92
--- /dev/null
+++ b/js/src/jit/riscv64/Architecture-riscv64.cpp
@@ -0,0 +1,100 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/riscv64/Architecture-riscv64.h"
+
+#include "jit/FlushICache.h" // js::jit::FlushICache
+#include "jit/RegisterSets.h"
+#include "jit/Simulator.h"
+namespace js {
+namespace jit {
+Registers::Code Registers::FromName(const char* name) {
+ for (size_t i = 0; i < Total; i++) {
+ if (strcmp(GetName(i), name) == 0) {
+ return Code(i);
+ }
+ }
+
+ return Invalid;
+}
+
+FloatRegisters::Code FloatRegisters::FromName(const char* name) {
+ for (size_t i = 0; i < Total; i++) {
+ if (strcmp(GetName(i), name) == 0) {
+ return Code(i);
+ }
+ }
+
+ return Invalid;
+}
+
+FloatRegisterSet FloatRegister::ReduceSetForPush(const FloatRegisterSet& s) {
+#ifdef ENABLE_WASM_SIMD
+# error "Needs more careful logic if SIMD is enabled"
+#endif
+
+ LiveFloatRegisterSet mod;
+ for (FloatRegisterIterator iter(s); iter.more(); ++iter) {
+ if ((*iter).isSingle()) {
+ // Even for single size registers save complete double register.
+ mod.addUnchecked((*iter).doubleOverlay());
+ } else {
+ mod.addUnchecked(*iter);
+ }
+ }
+ return mod.set();
+}
+
+FloatRegister FloatRegister::singleOverlay() const {
+ MOZ_ASSERT(!isInvalid());
+ if (kind_ == Codes::Double) {
+ return FloatRegister(encoding_, Codes::Single);
+ }
+ return *this;
+}
+
+FloatRegister FloatRegister::doubleOverlay() const {
+ MOZ_ASSERT(!isInvalid());
+ if (kind_ != Codes::Double) {
+ return FloatRegister(encoding_, Codes::Double);
+ }
+ return *this;
+}
+
+uint32_t FloatRegister::GetPushSizeInBytes(
+ const TypedRegisterSet<FloatRegister>& s) {
+#ifdef ENABLE_WASM_SIMD
+# error "Needs more careful logic if SIMD is enabled"
+#endif
+
+ return s.size() * sizeof(double);
+}
+void FlushICache(void* code, size_t size) {
+#if defined(JS_SIMULATOR)
+ js::jit::SimulatorProcess::FlushICache(code, size);
+
+#elif defined(__linux__)
+# if defined(__GNUC__)
+ intptr_t end = reinterpret_cast<intptr_t>(code) + size;
+ __builtin___clear_cache(reinterpret_cast<char*>(code),
+ reinterpret_cast<char*>(end));
+
+# else
+ _flush_cache(reinterpret_cast<char*>(code), size, BCACHE);
+# endif
+#else
+# error "Unsupported platform"
+#endif
+}
+
+bool CPUFlagsHaveBeenComputed() {
+ // TODO Add CPU flags support
+ // Flags were computed above.
+ return true;
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/riscv64/Architecture-riscv64.h b/js/src/jit/riscv64/Architecture-riscv64.h
new file mode 100644
index 0000000000..e53273f2e2
--- /dev/null
+++ b/js/src/jit/riscv64/Architecture-riscv64.h
@@ -0,0 +1,513 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_riscv64_Architecture_riscv64_h
+#define jit_riscv64_Architecture_riscv64_h
+
+// JitSpewer.h is included through MacroAssembler implementations for other
+// platforms, so include it here to avoid inadvertent build bustage.
+#include "mozilla/MathAlgorithms.h"
+
+#include <algorithm>
+#include <iterator>
+
+#include "jit/JitSpewer.h"
+#include "jit/shared/Architecture-shared.h"
+#include "js/Utility.h"
+
+namespace js {
+namespace jit {
+
+static const uint32_t SimdMemoryAlignment =
+ 16; // Make it 4 to avoid a bunch of div-by-zero warnings
+
+// RISCV64 has 32 64-bit integer registers, x0 though x31.
+// The program counter is not accessible as a register.
+
+// RISCV INT Register Convention:
+// Name Alias Usage
+// x0 zero hardwired to 0, ignores writes
+// x1 ra return address for calls
+// x2 sp stack pointer
+// x3 gp global pointer
+// x4 tp thread pointer
+// x5-x7 t0-t2 temporary register 0
+// x8 fp/s0 Callee-saved register 0 or frame pointer
+// x9 s1 Callee-saved register 1
+// x10-x11 a0-a1 return value or function argument
+// x12-x17 a2-a7 function argument 2
+// x18-x27 s2-s11 Callee-saved register
+// x28-x31 t3-t6 temporary register 3
+
+// RISCV-64 FP Register Convention:
+// Name Alias Usage
+// $f0-$f7 $ft0-$ft7 Temporary registers
+// $f8-$f9 $fs0-$fs1 Callee-saved registers
+// $f10-$f11 $fa0-$fa1 Return values
+// $f12-$f17 $fa2-$fa7 Args values
+// $f18-$f27 $fs2-$fs11 Callee-saved registers
+// $f28-$f31 $ft8-$ft11 Temporary registers
+class Registers {
+ public:
+ enum RegisterID {
+ x0 = 0,
+ x1,
+ x2,
+ x3,
+ x4,
+ x5,
+ x6,
+ x7,
+ x8,
+ x9,
+ x10,
+ x11,
+ x12,
+ x13,
+ x14,
+ x15,
+ x16,
+ x17,
+ x18,
+ x19,
+ x20,
+ x21,
+ x22,
+ x23,
+ x24,
+ x25,
+ x26,
+ x27,
+ x28,
+ x29,
+ x30,
+ x31,
+ zero = x0,
+ ra = x1,
+ sp = x2,
+ gp = x3,
+ tp = x4,
+ t0 = x5,
+ t1 = x6,
+ t2 = x7,
+ fp = x8,
+ s1 = x9,
+ a0 = x10,
+ a1 = x11,
+ a2 = x12,
+ a3 = x13,
+ a4 = x14,
+ a5 = x15,
+ a6 = x16,
+ a7 = x17,
+ s2 = x18,
+ s3 = x19,
+ s4 = x20,
+ s5 = x21,
+ s6 = x22,
+ s7 = x23,
+ s8 = x24,
+ s9 = x25,
+ s10 = x26,
+ s11 = x27,
+ t3 = x28,
+ t4 = x29,
+ t5 = x30,
+ t6 = x31,
+ invalid_reg,
+ };
+ typedef uint8_t Code;
+ typedef RegisterID Encoding;
+ union RegisterContent {
+ uintptr_t r;
+ };
+
+ typedef uint32_t SetType;
+
+ static uint32_t SetSize(SetType x) {
+ static_assert(sizeof(SetType) == 4, "SetType must be 32 bits");
+ return mozilla::CountPopulation32(x);
+ }
+ static uint32_t FirstBit(SetType x) {
+ return mozilla::CountTrailingZeroes32(x);
+ }
+ static uint32_t LastBit(SetType x) {
+ return 31 - mozilla::CountLeadingZeroes32(x);
+ }
+ static const char* GetName(uint32_t code) {
+ static const char* const Names[] = {
+ "zero", "ra", "sp", "gp", "tp", "t0", "t1", "t2", "fp", "s1", "a0",
+ "a1", "a2", "a3", "a4", "a5", "a6", "a7", "s2", "s3", "s4", "s5",
+ "s6", "s7", "s8", "s9", "s10", "s11", "t3", "t4", "t5", "t6"};
+ static_assert(Total == std::size(Names), "Table is the correct size");
+ if (code >= Total) {
+ return "invalid";
+ }
+ return Names[code];
+ }
+
+ static Code FromName(const char*);
+
+ static const Encoding StackPointer = sp;
+ static const Encoding Invalid = invalid_reg;
+ static const uint32_t Total = 32;
+ static const uint32_t TotalPhys = 32;
+ static const uint32_t Allocatable = 24;
+ static const SetType NoneMask = 0x0;
+ static const SetType AllMask = 0xFFFFFFFF;
+ static const SetType ArgRegMask =
+ (1 << Registers::a0) | (1 << Registers::a1) | (1 << Registers::a2) |
+ (1 << Registers::a3) | (1 << Registers::a4) | (1 << Registers::a5) |
+ (1 << Registers::a6) | (1 << Registers::a7);
+
+ static const SetType VolatileMask =
+ ArgRegMask | (1 << Registers::t0) | (1 << Registers::t1) |
+ (1 << Registers::t2) | (1 << Registers::t3) | (1 << Registers::t4) |
+ (1 << Registers::t5) | (1 << Registers::t6);
+
+ // We use this constant to save registers when entering functions. This
+ // is why $ra is added here even though it is not "Non Volatile".
+ static const SetType NonVolatileMask =
+ (1 << Registers::ra) | (1 << Registers::fp) | (1 << Registers::s1) |
+ (1 << Registers::s2) | (1 << Registers::s3) | (1 << Registers::s4) |
+ (1 << Registers::s5) | (1 << Registers::s6) | (1 << Registers::s7) |
+ (1 << Registers::s8) | (1 << Registers::s9) | (1 << Registers::s10) |
+ (1 << Registers::s11);
+
+ static const SetType NonAllocatableMask =
+ (1 << Registers::zero) | // Always be zero.
+ (1 << Registers::t4) | // Scratch reg
+ (1 << Registers::t5) | // Scratch reg
+ (1 << Registers::t6) | // Scratch reg or call reg
+ (1 << Registers::s11) | // Scratch reg
+ (1 << Registers::ra) | (1 << Registers::tp) | (1 << Registers::sp) |
+ (1 << Registers::fp) | (1 << Registers::gp);
+
+ static const SetType AllocatableMask = AllMask & ~NonAllocatableMask;
+
+ // Registers returned from a JS -> JS call.
+ static const SetType JSCallMask = (1 << Registers::a2);
+
+ // Registers returned from a JS -> C call.
+ static const SetType CallMask = (1 << Registers::a0);
+
+ static const SetType WrapperMask = VolatileMask;
+};
+
+// Smallest integer type that can hold a register bitmask.
+typedef uint32_t PackedRegisterMask;
+
+class FloatRegisters {
+ public:
+ enum FPRegisterID {
+ f0 = 0,
+ f1,
+ f2,
+ f3,
+ f4,
+ f5,
+ f6,
+ f7,
+ f8,
+ f9,
+ f10,
+ f11,
+ f12,
+ f13,
+ f14,
+ f15,
+ f16,
+ f17,
+ f18,
+ f19,
+ f20,
+ f21,
+ f22,
+ f23,
+ f24,
+ f25,
+ f26,
+ f27,
+ f28,
+ f29,
+ f30,
+ f31,
+ invalid_reg,
+ ft0 = f0,
+ ft1 = f1,
+ ft2 = f2,
+ ft3 = f3,
+ ft4 = f4,
+ ft5 = f5,
+ ft6 = f6,
+ ft7 = f7,
+ fs0 = f8,
+ fs1 = f9,
+ fa0 = f10,
+ fa1 = f11,
+ fa2 = f12,
+ fa3 = f13,
+ fa4 = f14,
+ fa5 = f15,
+ fa6 = f16,
+ fa7 = f17,
+ fs2 = f18,
+ fs3 = f19,
+ fs4 = f20,
+ fs5 = f21,
+ fs6 = f22,
+ fs7 = f23,
+ fs8 = f24,
+ fs9 = f25,
+ fs10 = f26,
+ fs11 = f27, // Scratch register
+ ft8 = f28,
+ ft9 = f29,
+ ft10 = f30, // Scratch register
+ ft11 = f31
+ };
+
+ enum Kind : uint8_t { Double, NumTypes, Single };
+
+ typedef FPRegisterID Code;
+ typedef FPRegisterID Encoding;
+ union RegisterContent {
+ float s;
+ double d;
+ };
+
+ static const char* GetName(uint32_t code) {
+ static const char* const Names[] = {
+ "ft0", "ft1", "ft2", "ft3", "ft4", "ft5", "ft6", "ft7",
+ "fs0", "fs2", "fa0", "fa1", "fa2", "fa3", "fa4", "fa5",
+ "fa6", "fa7", "fs2", "fs3", "fs4", "fs5", "fs6", "fs7",
+ "fs8", "fs9", "fs10", "fs11", "ft8", "ft9", "ft10", "ft11"};
+ static_assert(TotalPhys == std::size(Names), "Table is the correct size");
+ if (code >= Total) {
+ return "invalid";
+ }
+ return Names[code];
+ }
+
+ static Code FromName(const char* name);
+
+ typedef uint32_t SetType;
+
+ static const Code Invalid = invalid_reg;
+ static const uint32_t Total = 32;
+ static const uint32_t TotalPhys = 32;
+ static const uint32_t Allocatable = 23;
+ static const SetType AllPhysMask = 0xFFFFFFFF;
+ static const SetType AllMask = 0xFFFFFFFF;
+ static const SetType AllDoubleMask = AllMask;
+ // Single values are stored as 64 bits values (NaN-boxed) when pushing them to
+ // the stack, we do not require making distinctions between the 2 types, and
+ // therefore the masks are overlapping.See The RISC-V Instruction Set Manual
+ // for 14.2 NaN Boxing of Narrower Values.
+ static const SetType AllSingleMask = AllMask;
+ static const SetType NonVolatileMask =
+ SetType((1 << FloatRegisters::fs0) | (1 << FloatRegisters::fs1) |
+ (1 << FloatRegisters::fs2) | (1 << FloatRegisters::fs3) |
+ (1 << FloatRegisters::fs4) | (1 << FloatRegisters::fs5) |
+ (1 << FloatRegisters::fs6) | (1 << FloatRegisters::fs7) |
+ (1 << FloatRegisters::fs8) | (1 << FloatRegisters::fs9) |
+ (1 << FloatRegisters::fs10) | (1 << FloatRegisters::fs11));
+ static const SetType VolatileMask = AllMask & ~NonVolatileMask;
+
+ // fs11/ft10 is the scratch register.
+ static const SetType NonAllocatableMask =
+ SetType((1 << FloatRegisters::fs11) | (1 << FloatRegisters::ft10));
+
+ static const SetType AllocatableMask = AllMask & ~NonAllocatableMask;
+};
+
+template <typename T>
+class TypedRegisterSet;
+
+struct FloatRegister {
+ public:
+ typedef FloatRegisters Codes;
+ typedef Codes::Code Code;
+ typedef Codes::Encoding Encoding;
+ typedef Codes::SetType SetType;
+
+ static uint32_t SetSize(SetType x) {
+ static_assert(sizeof(SetType) == 4, "SetType must be 32 bits");
+ x &= FloatRegisters::AllPhysMask;
+ return mozilla::CountPopulation32(x);
+ }
+
+ static uint32_t FirstBit(SetType x) {
+ static_assert(sizeof(SetType) == 4, "SetType");
+ return mozilla::CountTrailingZeroes64(x);
+ }
+ static uint32_t LastBit(SetType x) {
+ static_assert(sizeof(SetType) == 4, "SetType");
+ return 31 - mozilla::CountLeadingZeroes64(x);
+ }
+
+ static FloatRegister FromCode(uint32_t i) {
+ uint32_t code = i & 0x1f;
+ return FloatRegister(Code(code));
+ }
+ bool isSimd128() const { return false; }
+ bool isInvalid() const { return invalid_; }
+ FloatRegister asSingle() const {
+ MOZ_ASSERT(!invalid_);
+ return FloatRegister(Encoding(encoding_), FloatRegisters::Single);
+ }
+ FloatRegister asDouble() const {
+ MOZ_ASSERT(!invalid_);
+ return FloatRegister(Encoding(encoding_), FloatRegisters::Double);
+ }
+ FloatRegister asSimd128() const { MOZ_CRASH(); }
+ constexpr Code code() const {
+ MOZ_ASSERT(!invalid_);
+ return encoding_;
+ }
+ Encoding encoding() const { return encoding_; }
+ const char* name() const { return FloatRegisters::GetName(code()); }
+ bool volatile_() const {
+ MOZ_ASSERT(!invalid_);
+ return !!((SetType(1) << code()) & FloatRegisters::VolatileMask);
+ }
+ bool operator!=(FloatRegister other) const { return code() != other.code(); }
+ bool operator==(FloatRegister other) const { return code() == other.code(); }
+ bool aliases(FloatRegister other) const {
+ return other.encoding_ == encoding_;
+ }
+ uint32_t numAliased() const { return 1; }
+ FloatRegister aliased(uint32_t aliasIdx) const {
+ MOZ_ASSERT(aliasIdx == 0);
+ return *this;
+ }
+ // Ensure that two floating point registers' types are equivalent.
+ bool equiv(FloatRegister other) const {
+ MOZ_ASSERT(!invalid_);
+ return kind_ == other.kind_;
+ }
+ constexpr uint32_t size() const {
+ MOZ_ASSERT(!invalid_);
+ if (kind_ == FloatRegisters::Double) {
+ return sizeof(double);
+ }
+ MOZ_ASSERT(kind_ == FloatRegisters::Single);
+ return sizeof(float);
+ }
+ uint32_t numAlignedAliased() { return numAliased(); }
+ FloatRegister alignedAliased(uint32_t aliasIdx) {
+ MOZ_ASSERT(aliasIdx < numAliased());
+ return aliased(aliasIdx);
+ }
+ SetType alignedOrDominatedAliasedSet() const { return SetType(1) << code(); }
+ static constexpr RegTypeName DefaultType = RegTypeName::Float64;
+
+ template <RegTypeName Name = DefaultType>
+ static SetType LiveAsIndexableSet(SetType s) {
+ return SetType(0);
+ }
+
+ template <RegTypeName Name = DefaultType>
+ static SetType AllocatableAsIndexableSet(SetType s) {
+ static_assert(Name != RegTypeName::Any, "Allocatable set are not iterable");
+ printf("AllocatableAsIndexableSet\n");
+ return LiveAsIndexableSet<Name>(s);
+ }
+
+ FloatRegister singleOverlay() const;
+ FloatRegister doubleOverlay() const;
+
+ static TypedRegisterSet<FloatRegister> ReduceSetForPush(
+ const TypedRegisterSet<FloatRegister>& s);
+
+ uint32_t getRegisterDumpOffsetInBytes() {
+#ifdef ENABLE_WASM_SIMD
+# error "Needs more careful logic if SIMD is enabled"
+#endif
+
+ return code() * sizeof(double);
+ }
+ static Code FromName(const char* name);
+
+ // This is used in static initializers, so produce a bogus value instead of
+ // crashing.
+ static uint32_t GetPushSizeInBytes(const TypedRegisterSet<FloatRegister>& s);
+
+ private:
+ typedef Codes::Kind Kind;
+ // These fields only hold valid values: an invalid register is always
+ // represented as a valid encoding and kind with the invalid_ bit set.
+ Encoding encoding_; // 32 encodings
+ Kind kind_; // Double, Single; more later
+ bool invalid_;
+
+ public:
+ constexpr FloatRegister(Encoding encoding, Kind kind)
+ : encoding_(encoding), kind_(kind), invalid_(false) {
+ MOZ_ASSERT(uint32_t(encoding) < Codes::Total);
+ }
+
+ constexpr FloatRegister(Encoding encoding)
+ : encoding_(encoding), kind_(FloatRegisters::Double), invalid_(false) {
+ MOZ_ASSERT(uint32_t(encoding) < Codes::Total);
+ }
+
+ constexpr FloatRegister()
+ : encoding_(FloatRegisters::invalid_reg),
+ kind_(FloatRegisters::Double),
+ invalid_(true) {}
+
+ bool isSingle() const {
+ MOZ_ASSERT(!invalid_);
+ return kind_ == FloatRegisters::Single;
+ }
+ bool isDouble() const {
+ MOZ_ASSERT(!invalid_);
+ return kind_ == FloatRegisters::Double;
+ }
+
+ Encoding code() { return encoding_; }
+};
+
+template <>
+inline FloatRegister::SetType
+FloatRegister::LiveAsIndexableSet<RegTypeName::Float32>(SetType set) {
+ return set & FloatRegisters::AllSingleMask;
+}
+
+template <>
+inline FloatRegister::SetType
+FloatRegister::LiveAsIndexableSet<RegTypeName::Float64>(SetType set) {
+ return set & FloatRegisters::AllDoubleMask;
+}
+
+template <>
+inline FloatRegister::SetType
+FloatRegister::LiveAsIndexableSet<RegTypeName::Any>(SetType set) {
+ return set;
+}
+
+inline bool hasUnaliasedDouble() { return false; }
+inline bool hasMultiAlias() { return false; }
+
+static const uint32_t ShadowStackSpace = 0;
+static const uint32_t JumpImmediateRange = INT32_MAX;
+
+#ifdef JS_NUNBOX32
+static const int32_t NUNBOX32_TYPE_OFFSET = 4;
+static const int32_t NUNBOX32_PAYLOAD_OFFSET = 0;
+#endif
+
+static const uint32_t SpillSlotSize =
+ std::max(sizeof(Registers::RegisterContent),
+ sizeof(FloatRegisters::RegisterContent));
+
+inline uint32_t GetRISCV64Flags() { return 0; }
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_riscv64_Architecture_riscv64_h */
diff --git a/js/src/jit/riscv64/Assembler-riscv64.cpp b/js/src/jit/riscv64/Assembler-riscv64.cpp
new file mode 100644
index 0000000000..d9e748bfb9
--- /dev/null
+++ b/js/src/jit/riscv64/Assembler-riscv64.cpp
@@ -0,0 +1,1548 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2021 the V8 project authors. All rights reserved.
+#include "jit/riscv64/Assembler-riscv64.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/Maybe.h"
+
+#include "gc/Marking.h"
+#include "jit/AutoWritableJitCode.h"
+#include "jit/ExecutableAllocator.h"
+#include "jit/riscv64/disasm/Disasm-riscv64.h"
+#include "vm/Realm.h"
+
+using mozilla::DebugOnly;
+namespace js {
+namespace jit {
+
+#define UNIMPLEMENTED_RISCV() MOZ_CRASH("RISC_V not implemented");
+
+bool Assembler::FLAG_riscv_debug = false;
+
+void Assembler::nop() { addi(ToRegister(0), ToRegister(0), 0); }
+
+// Size of the instruction stream, in bytes.
+size_t Assembler::size() const { return m_buffer.size(); }
+
+bool Assembler::swapBuffer(wasm::Bytes& bytes) {
+ // For now, specialize to the one use case. As long as wasm::Bytes is a
+ // Vector, not a linked-list of chunks, there's not much we can do other
+ // than copy.
+ MOZ_ASSERT(bytes.empty());
+ if (!bytes.resize(bytesNeeded())) {
+ return false;
+ }
+ m_buffer.executableCopy(bytes.begin());
+ return true;
+}
+
+// Size of the relocation table, in bytes.
+size_t Assembler::jumpRelocationTableBytes() const {
+ return jumpRelocations_.length();
+}
+
+size_t Assembler::dataRelocationTableBytes() const {
+ return dataRelocations_.length();
+}
+// Size of the data table, in bytes.
+size_t Assembler::bytesNeeded() const {
+ return size() + jumpRelocationTableBytes() + dataRelocationTableBytes();
+}
+
+void Assembler::executableCopy(uint8_t* buffer) {
+ MOZ_ASSERT(isFinished);
+ m_buffer.executableCopy(buffer);
+}
+
+uint32_t Assembler::AsmPoolMaxOffset = 1024;
+
+uint32_t Assembler::GetPoolMaxOffset() {
+ static bool isSet = false;
+ if (!isSet) {
+ char* poolMaxOffsetStr = getenv("ASM_POOL_MAX_OFFSET");
+ uint32_t poolMaxOffset;
+ if (poolMaxOffsetStr &&
+ sscanf(poolMaxOffsetStr, "%u", &poolMaxOffset) == 1) {
+ AsmPoolMaxOffset = poolMaxOffset;
+ }
+ isSet = true;
+ }
+ return AsmPoolMaxOffset;
+}
+
+// Pool callbacks stuff:
+void Assembler::InsertIndexIntoTag(uint8_t* load_, uint32_t index) {
+ MOZ_CRASH("Unimplement");
+}
+
+void Assembler::PatchConstantPoolLoad(void* loadAddr, void* constPoolAddr) {
+ MOZ_CRASH("Unimplement");
+}
+
+void Assembler::processCodeLabels(uint8_t* rawCode) {
+ for (const CodeLabel& label : codeLabels_) {
+ Bind(rawCode, label);
+ }
+}
+
+void Assembler::WritePoolGuard(BufferOffset branch, Instruction* dest,
+ BufferOffset afterPool) {
+ DEBUG_PRINTF("\tWritePoolGuard\n");
+ int32_t off = afterPool.getOffset() - branch.getOffset();
+ if (!is_int21(off) || !((off & 0x1) == 0)) {
+ printf("%d\n", off);
+ MOZ_CRASH("imm invalid");
+ }
+ // JAL encode is
+ // 31 | 30 21 | 20 | 19 12 | 11 7 | 6 0 |
+ // imm[20] | imm[10:1] | imm[11] | imm[19:12] | rd | opcode|
+ // 1 10 1 8 5 7
+ // offset[20:1] dest JAL
+ int32_t imm20 = (off & 0xff000) | // bits 19-12
+ ((off & 0x800) << 9) | // bit 11
+ ((off & 0x7fe) << 20) | // bits 10-1
+ ((off & 0x100000) << 11); // bit 20
+ Instr instr = JAL | (imm20 & kImm20Mask);
+ dest->SetInstructionBits(instr);
+ DEBUG_PRINTF("%p(%x): ", dest, branch.getOffset());
+ disassembleInstr(dest->InstructionBits(), JitSpew_Codegen);
+}
+
+void Assembler::WritePoolHeader(uint8_t* start, Pool* p, bool isNatural) {
+ static_assert(sizeof(PoolHeader) == 4);
+
+ // Get the total size of the pool.
+ const uintptr_t totalPoolSize = sizeof(PoolHeader) + p->getPoolSize();
+ const uintptr_t totalPoolInstructions = totalPoolSize / kInstrSize;
+
+ MOZ_ASSERT((totalPoolSize & 0x3) == 0);
+ MOZ_ASSERT(totalPoolInstructions < (1 << 15));
+
+ PoolHeader header(totalPoolInstructions, isNatural);
+ *(PoolHeader*)start = header;
+}
+
+void Assembler::copyJumpRelocationTable(uint8_t* dest) {
+ if (jumpRelocations_.length()) {
+ memcpy(dest, jumpRelocations_.buffer(), jumpRelocations_.length());
+ }
+}
+
+void Assembler::copyDataRelocationTable(uint8_t* dest) {
+ if (dataRelocations_.length()) {
+ memcpy(dest, dataRelocations_.buffer(), dataRelocations_.length());
+ }
+}
+
+void Assembler::RV_li(Register rd, int64_t imm) {
+ UseScratchRegisterScope temps(this);
+ if (RecursiveLiCount(imm) > GeneralLiCount(imm, temps.hasAvailable())) {
+ GeneralLi(rd, imm);
+ } else {
+ RecursiveLi(rd, imm);
+ }
+}
+
+int Assembler::RV_li_count(int64_t imm, bool is_get_temp_reg) {
+ if (RecursiveLiCount(imm) > GeneralLiCount(imm, is_get_temp_reg)) {
+ return GeneralLiCount(imm, is_get_temp_reg);
+ } else {
+ return RecursiveLiCount(imm);
+ }
+}
+
+void Assembler::GeneralLi(Register rd, int64_t imm) {
+ // 64-bit imm is put in the register rd.
+ // In most cases the imm is 32 bit and 2 instructions are generated. If a
+ // temporary register is available, in the worst case, 6 instructions are
+ // generated for a full 64-bit immediate. If temporay register is not
+ // available the maximum will be 8 instructions. If imm is more than 32 bits
+ // and a temp register is available, imm is divided into two 32-bit parts,
+ // low_32 and up_32. Each part is built in a separate register. low_32 is
+ // built before up_32. If low_32 is negative (upper 32 bits are 1), 0xffffffff
+ // is subtracted from up_32 before up_32 is built. This compensates for 32
+ // bits of 1's in the lower when the two registers are added. If no temp is
+ // available, the upper 32 bit is built in rd, and the lower 32 bits are
+ // devided to 3 parts (11, 11, and 10 bits). The parts are shifted and added
+ // to the upper part built in rd.
+ if (is_int32(imm + 0x800)) {
+ // 32-bit case. Maximum of 2 instructions generated
+ int64_t high_20 = ((imm + 0x800) >> 12);
+ int64_t low_12 = imm << 52 >> 52;
+ if (high_20) {
+ lui(rd, (int32_t)high_20);
+ if (low_12) {
+ addi(rd, rd, low_12);
+ }
+ } else {
+ addi(rd, zero_reg, low_12);
+ }
+ return;
+ } else {
+ UseScratchRegisterScope temps(this);
+ // 64-bit case: divide imm into two 32-bit parts, upper and lower
+ int64_t up_32 = imm >> 32;
+ int64_t low_32 = imm & 0xffffffffull;
+ Register temp_reg = rd;
+ // Check if a temporary register is available
+ if (up_32 == 0 || low_32 == 0) {
+ // No temp register is needed
+ } else {
+ BlockTrampolinePoolScope block_trampoline_pool(this, 0);
+ temp_reg = temps.hasAvailable() ? temps.Acquire() : InvalidReg;
+ }
+ if (temp_reg != InvalidReg) {
+ // keep track of hardware behavior for lower part in sim_low
+ int64_t sim_low = 0;
+ // Build lower part
+ if (low_32 != 0) {
+ int64_t high_20 = ((low_32 + 0x800) >> 12);
+ int64_t low_12 = low_32 & 0xfff;
+ if (high_20) {
+ // Adjust to 20 bits for the case of overflow
+ high_20 &= 0xfffff;
+ sim_low = ((high_20 << 12) << 32) >> 32;
+ lui(rd, (int32_t)high_20);
+ if (low_12) {
+ sim_low += (low_12 << 52 >> 52) | low_12;
+ addi(rd, rd, low_12);
+ }
+ } else {
+ sim_low = low_12;
+ ori(rd, zero_reg, low_12);
+ }
+ }
+ if (sim_low & 0x100000000) {
+ // Bit 31 is 1. Either an overflow or a negative 64 bit
+ if (up_32 == 0) {
+ // Positive number, but overflow because of the add 0x800
+ slli(rd, rd, 32);
+ srli(rd, rd, 32);
+ return;
+ }
+ // low_32 is a negative 64 bit after the build
+ up_32 = (up_32 - 0xffffffff) & 0xffffffff;
+ }
+ if (up_32 == 0) {
+ return;
+ }
+ // Build upper part in a temporary register
+ if (low_32 == 0) {
+ // Build upper part in rd
+ temp_reg = rd;
+ }
+ int64_t high_20 = (up_32 + 0x800) >> 12;
+ int64_t low_12 = up_32 & 0xfff;
+ if (high_20) {
+ // Adjust to 20 bits for the case of overflow
+ high_20 &= 0xfffff;
+ lui(temp_reg, (int32_t)high_20);
+ if (low_12) {
+ addi(temp_reg, temp_reg, low_12);
+ }
+ } else {
+ ori(temp_reg, zero_reg, low_12);
+ }
+ // Put it at the bgining of register
+ slli(temp_reg, temp_reg, 32);
+ if (low_32 != 0) {
+ add(rd, rd, temp_reg);
+ }
+ return;
+ }
+ // No temp register. Build imm in rd.
+ // Build upper 32 bits first in rd. Divide lower 32 bits parts and add
+ // parts to the upper part by doing shift and add.
+ // First build upper part in rd.
+ int64_t high_20 = (up_32 + 0x800) >> 12;
+ int64_t low_12 = up_32 & 0xfff;
+ if (high_20) {
+ // Adjust to 20 bits for the case of overflow
+ high_20 &= 0xfffff;
+ lui(rd, (int32_t)high_20);
+ if (low_12) {
+ addi(rd, rd, low_12);
+ }
+ } else {
+ ori(rd, zero_reg, low_12);
+ }
+ // upper part already in rd. Each part to be added to rd, has maximum of 11
+ // bits, and always starts with a 1. rd is shifted by the size of the part
+ // plus the number of zeros between the parts. Each part is added after the
+ // left shift.
+ uint32_t mask = 0x80000000;
+ int32_t shift_val = 0;
+ int32_t i;
+ for (i = 0; i < 32; i++) {
+ if ((low_32 & mask) == 0) {
+ mask >>= 1;
+ shift_val++;
+ if (i == 31) {
+ // rest is zero
+ slli(rd, rd, shift_val);
+ }
+ continue;
+ }
+ // The first 1 seen
+ int32_t part;
+ if ((i + 11) < 32) {
+ // Pick 11 bits
+ part = ((uint32_t)(low_32 << i) >> i) >> (32 - (i + 11));
+ slli(rd, rd, shift_val + 11);
+ ori(rd, rd, part);
+ i += 10;
+ mask >>= 11;
+ } else {
+ part = (uint32_t)(low_32 << i) >> i;
+ slli(rd, rd, shift_val + (32 - i));
+ ori(rd, rd, part);
+ break;
+ }
+ shift_val = 0;
+ }
+ }
+}
+
+int Assembler::GeneralLiCount(int64_t imm, bool is_get_temp_reg) {
+ int count = 0;
+ // imitate Assembler::RV_li
+ if (is_int32(imm + 0x800)) {
+ // 32-bit case. Maximum of 2 instructions generated
+ int64_t high_20 = ((imm + 0x800) >> 12);
+ int64_t low_12 = imm << 52 >> 52;
+ if (high_20) {
+ count++;
+ if (low_12) {
+ count++;
+ }
+ } else {
+ count++;
+ }
+ return count;
+ } else {
+ // 64-bit case: divide imm into two 32-bit parts, upper and lower
+ int64_t up_32 = imm >> 32;
+ int64_t low_32 = imm & 0xffffffffull;
+ // Check if a temporary register is available
+ if (is_get_temp_reg) {
+ // keep track of hardware behavior for lower part in sim_low
+ int64_t sim_low = 0;
+ // Build lower part
+ if (low_32 != 0) {
+ int64_t high_20 = ((low_32 + 0x800) >> 12);
+ int64_t low_12 = low_32 & 0xfff;
+ if (high_20) {
+ // Adjust to 20 bits for the case of overflow
+ high_20 &= 0xfffff;
+ sim_low = ((high_20 << 12) << 32) >> 32;
+ count++;
+ if (low_12) {
+ sim_low += (low_12 << 52 >> 52) | low_12;
+ count++;
+ }
+ } else {
+ sim_low = low_12;
+ count++;
+ }
+ }
+ if (sim_low & 0x100000000) {
+ // Bit 31 is 1. Either an overflow or a negative 64 bit
+ if (up_32 == 0) {
+ // Positive number, but overflow because of the add 0x800
+ count++;
+ count++;
+ return count;
+ }
+ // low_32 is a negative 64 bit after the build
+ up_32 = (up_32 - 0xffffffff) & 0xffffffff;
+ }
+ if (up_32 == 0) {
+ return count;
+ }
+ int64_t high_20 = (up_32 + 0x800) >> 12;
+ int64_t low_12 = up_32 & 0xfff;
+ if (high_20) {
+ // Adjust to 20 bits for the case of overflow
+ high_20 &= 0xfffff;
+ count++;
+ if (low_12) {
+ count++;
+ }
+ } else {
+ count++;
+ }
+ // Put it at the bgining of register
+ count++;
+ if (low_32 != 0) {
+ count++;
+ }
+ return count;
+ }
+ // No temp register. Build imm in rd.
+ // Build upper 32 bits first in rd. Divide lower 32 bits parts and add
+ // parts to the upper part by doing shift and add.
+ // First build upper part in rd.
+ int64_t high_20 = (up_32 + 0x800) >> 12;
+ int64_t low_12 = up_32 & 0xfff;
+ if (high_20) {
+ // Adjust to 20 bits for the case of overflow
+ high_20 &= 0xfffff;
+ count++;
+ if (low_12) {
+ count++;
+ }
+ } else {
+ count++;
+ }
+ // upper part already in rd. Each part to be added to rd, has maximum of 11
+ // bits, and always starts with a 1. rd is shifted by the size of the part
+ // plus the number of zeros between the parts. Each part is added after the
+ // left shift.
+ uint32_t mask = 0x80000000;
+ int32_t i;
+ for (i = 0; i < 32; i++) {
+ if ((low_32 & mask) == 0) {
+ mask >>= 1;
+ if (i == 31) {
+ // rest is zero
+ count++;
+ }
+ continue;
+ }
+ // The first 1 seen
+ if ((i + 11) < 32) {
+ // Pick 11 bits
+ count++;
+ count++;
+ i += 10;
+ mask >>= 11;
+ } else {
+ count++;
+ count++;
+ break;
+ }
+ }
+ }
+ return count;
+}
+
+void Assembler::li_ptr(Register rd, int64_t imm) {
+ m_buffer.enterNoNops();
+ m_buffer.assertNoPoolAndNoNops();
+ // Initialize rd with an address
+ // Pointers are 48 bits
+ // 6 fixed instructions are generated
+ DEBUG_PRINTF("li_ptr(%d, %lx <%ld>)\n", ToNumber(rd), imm, imm);
+ MOZ_ASSERT((imm & 0xfff0000000000000ll) == 0);
+ int64_t a6 = imm & 0x3f; // bits 0:5. 6 bits
+ int64_t b11 = (imm >> 6) & 0x7ff; // bits 6:11. 11 bits
+ int64_t high_31 = (imm >> 17) & 0x7fffffff; // 31 bits
+ int64_t high_20 = ((high_31 + 0x800) >> 12); // 19 bits
+ int64_t low_12 = high_31 & 0xfff; // 12 bits
+ lui(rd, (int32_t)high_20);
+ addi(rd, rd, low_12); // 31 bits in rd.
+ slli(rd, rd, 11); // Space for next 11 bis
+ ori(rd, rd, b11); // 11 bits are put in. 42 bit in rd
+ slli(rd, rd, 6); // Space for next 6 bits
+ ori(rd, rd, a6); // 6 bits are put in. 48 bis in rd
+ m_buffer.leaveNoNops();
+}
+
+void Assembler::li_constant(Register rd, int64_t imm) {
+ m_buffer.enterNoNops();
+ m_buffer.assertNoPoolAndNoNops();
+ DEBUG_PRINTF("li_constant(%d, %lx <%ld>)\n", ToNumber(rd), imm, imm);
+ lui(rd, (imm + (1LL << 47) + (1LL << 35) + (1LL << 23) + (1LL << 11)) >>
+ 48); // Bits 63:48
+ addiw(rd, rd,
+ (imm + (1LL << 35) + (1LL << 23) + (1LL << 11)) << 16 >>
+ 52); // Bits 47:36
+ slli(rd, rd, 12);
+ addi(rd, rd, (imm + (1LL << 23) + (1LL << 11)) << 28 >> 52); // Bits 35:24
+ slli(rd, rd, 12);
+ addi(rd, rd, (imm + (1LL << 11)) << 40 >> 52); // Bits 23:12
+ slli(rd, rd, 12);
+ addi(rd, rd, imm << 52 >> 52); // Bits 11:0
+ m_buffer.leaveNoNops();
+}
+
+ABIArg ABIArgGenerator::next(MIRType type) {
+ switch (type) {
+ case MIRType::Int32:
+ case MIRType::Int64:
+ case MIRType::Pointer:
+ case MIRType::RefOrNull:
+ case MIRType::StackResults: {
+ if (intRegIndex_ == NumIntArgRegs) {
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += sizeof(uintptr_t);
+ break;
+ }
+ current_ = ABIArg(Register::FromCode(intRegIndex_ + a0.encoding()));
+ intRegIndex_++;
+ break;
+ }
+ case MIRType::Float32:
+ case MIRType::Double: {
+ if (floatRegIndex_ == NumFloatArgRegs) {
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += sizeof(double);
+ break;
+ }
+ current_ = ABIArg(FloatRegister(
+ FloatRegisters::Encoding(floatRegIndex_ + fa0.encoding()),
+ type == MIRType::Double ? FloatRegisters::Double
+ : FloatRegisters::Single));
+ floatRegIndex_++;
+ break;
+ }
+ case MIRType::Simd128: {
+ MOZ_CRASH("RISCV64 does not support simd yet.");
+ break;
+ }
+ default:
+ MOZ_CRASH("Unexpected argument type");
+ }
+ return current_;
+}
+
+bool Assembler::oom() const {
+ return AssemblerShared::oom() || m_buffer.oom() || jumpRelocations_.oom() ||
+ dataRelocations_.oom() || !enoughLabelCache_;
+}
+
+int Assembler::disassembleInstr(Instr instr, bool enable_spew) {
+ if (!FLAG_riscv_debug && !enable_spew) return -1;
+ disasm::NameConverter converter;
+ disasm::Disassembler disasm(converter);
+ EmbeddedVector<char, 128> disasm_buffer;
+
+ int size =
+ disasm.InstructionDecode(disasm_buffer, reinterpret_cast<byte*>(&instr));
+ DEBUG_PRINTF("%s\n", disasm_buffer.start());
+ if (enable_spew) {
+ JitSpew(JitSpew_Codegen, "%s", disasm_buffer.start());
+ }
+ return size;
+}
+
+uintptr_t Assembler::target_address_at(Instruction* pc) {
+ Instruction* instr0 = pc;
+ DEBUG_PRINTF("target_address_at: pc: 0x%p\t", instr0);
+ Instruction* instr1 = pc + 1 * kInstrSize;
+ Instruction* instr2 = pc + 2 * kInstrSize;
+ Instruction* instr3 = pc + 3 * kInstrSize;
+ Instruction* instr4 = pc + 4 * kInstrSize;
+ Instruction* instr5 = pc + 5 * kInstrSize;
+
+ // Interpret instructions for address generated by li: See listing in
+ // Assembler::set_target_address_at() just below.
+ if (IsLui(*reinterpret_cast<Instr*>(instr0)) &&
+ IsAddi(*reinterpret_cast<Instr*>(instr1)) &&
+ IsSlli(*reinterpret_cast<Instr*>(instr2)) &&
+ IsOri(*reinterpret_cast<Instr*>(instr3)) &&
+ IsSlli(*reinterpret_cast<Instr*>(instr4)) &&
+ IsOri(*reinterpret_cast<Instr*>(instr5))) {
+ // Assemble the 64 bit value.
+ int64_t addr = (int64_t)(instr0->Imm20UValue() << kImm20Shift) +
+ (int64_t)instr1->Imm12Value();
+ MOZ_ASSERT(instr2->Imm12Value() == 11);
+ addr <<= 11;
+ addr |= (int64_t)instr3->Imm12Value();
+ MOZ_ASSERT(instr4->Imm12Value() == 6);
+ addr <<= 6;
+ addr |= (int64_t)instr5->Imm12Value();
+
+ DEBUG_PRINTF("addr: %lx\n", addr);
+ return static_cast<uintptr_t>(addr);
+ }
+ // We should never get here, force a bad address if we do.
+ MOZ_CRASH("RISC-V UNREACHABLE");
+}
+
+void Assembler::PatchDataWithValueCheck(CodeLocationLabel label,
+ ImmPtr newValue, ImmPtr expectedValue) {
+ PatchDataWithValueCheck(label, PatchedImmPtr(newValue.value),
+ PatchedImmPtr(expectedValue.value));
+}
+
+void Assembler::PatchDataWithValueCheck(CodeLocationLabel label,
+ PatchedImmPtr newValue,
+ PatchedImmPtr expectedValue) {
+ Instruction* inst = (Instruction*)label.raw();
+
+ // Extract old Value
+ DebugOnly<uint64_t> value = Assembler::ExtractLoad64Value(inst);
+ MOZ_ASSERT(value == uint64_t(expectedValue.value));
+
+ // Replace with new value
+ Assembler::UpdateLoad64Value(inst, uint64_t(newValue.value));
+}
+
+uint64_t Assembler::ExtractLoad64Value(Instruction* inst0) {
+ DEBUG_PRINTF("\tExtractLoad64Value: \tpc:%p ", inst0);
+ if (IsJal(*reinterpret_cast<Instr*>(inst0))) {
+ int offset = inst0->Imm20JValue();
+ inst0 = inst0 + offset;
+ }
+ Instruction* instr1 = inst0 + 1 * kInstrSize;
+ if (IsAddiw(*reinterpret_cast<Instr*>(instr1))) {
+ // Li64
+ Instruction* instr2 = inst0 + 2 * kInstrSize;
+ Instruction* instr3 = inst0 + 3 * kInstrSize;
+ Instruction* instr4 = inst0 + 4 * kInstrSize;
+ Instruction* instr5 = inst0 + 5 * kInstrSize;
+ Instruction* instr6 = inst0 + 6 * kInstrSize;
+ Instruction* instr7 = inst0 + 7 * kInstrSize;
+ if (IsLui(*reinterpret_cast<Instr*>(inst0)) &&
+ IsAddiw(*reinterpret_cast<Instr*>(instr1)) &&
+ IsSlli(*reinterpret_cast<Instr*>(instr2)) &&
+ IsAddi(*reinterpret_cast<Instr*>(instr3)) &&
+ IsSlli(*reinterpret_cast<Instr*>(instr4)) &&
+ IsAddi(*reinterpret_cast<Instr*>(instr5)) &&
+ IsSlli(*reinterpret_cast<Instr*>(instr6)) &&
+ IsAddi(*reinterpret_cast<Instr*>(instr7))) {
+ int64_t imm = (int64_t)(inst0->Imm20UValue() << kImm20Shift) +
+ (int64_t)instr1->Imm12Value();
+ MOZ_ASSERT(instr2->Imm12Value() == 12);
+ imm <<= 12;
+ imm += (int64_t)instr3->Imm12Value();
+ MOZ_ASSERT(instr4->Imm12Value() == 12);
+ imm <<= 12;
+ imm += (int64_t)instr5->Imm12Value();
+ MOZ_ASSERT(instr6->Imm12Value() == 12);
+ imm <<= 12;
+ imm += (int64_t)instr7->Imm12Value();
+ DEBUG_PRINTF("imm:%lx\n", imm);
+ return imm;
+ } else {
+ FLAG_riscv_debug = true;
+ disassembleInstr(inst0->InstructionBits());
+ disassembleInstr(instr1->InstructionBits());
+ disassembleInstr(instr2->InstructionBits());
+ disassembleInstr(instr3->InstructionBits());
+ disassembleInstr(instr4->InstructionBits());
+ disassembleInstr(instr5->InstructionBits());
+ disassembleInstr(instr6->InstructionBits());
+ disassembleInstr(instr7->InstructionBits());
+ MOZ_CRASH();
+ }
+ } else {
+ DEBUG_PRINTF("\n");
+ Instruction* instrf1 = (inst0 - 1 * kInstrSize);
+ Instruction* instr2 = inst0 + 2 * kInstrSize;
+ Instruction* instr3 = inst0 + 3 * kInstrSize;
+ Instruction* instr4 = inst0 + 4 * kInstrSize;
+ Instruction* instr5 = inst0 + 5 * kInstrSize;
+ Instruction* instr6 = inst0 + 6 * kInstrSize;
+ Instruction* instr7 = inst0 + 7 * kInstrSize;
+ disassembleInstr(instrf1->InstructionBits());
+ disassembleInstr(inst0->InstructionBits());
+ disassembleInstr(instr1->InstructionBits());
+ disassembleInstr(instr2->InstructionBits());
+ disassembleInstr(instr3->InstructionBits());
+ disassembleInstr(instr4->InstructionBits());
+ disassembleInstr(instr5->InstructionBits());
+ disassembleInstr(instr6->InstructionBits());
+ disassembleInstr(instr7->InstructionBits());
+ MOZ_ASSERT(IsAddi(*reinterpret_cast<Instr*>(instr1)));
+ // Li48
+ return target_address_at(inst0);
+ }
+}
+
+void Assembler::UpdateLoad64Value(Instruction* pc, uint64_t value) {
+ DEBUG_PRINTF("\tUpdateLoad64Value: pc: %p\tvalue: %lx\n", pc, value);
+ Instruction* instr1 = pc + 1 * kInstrSize;
+ if (IsJal(*reinterpret_cast<Instr*>(pc))) {
+ pc = pc + pc->Imm20JValue();
+ instr1 = pc + 1 * kInstrSize;
+ }
+ if (IsAddiw(*reinterpret_cast<Instr*>(instr1))) {
+ Instruction* instr0 = pc;
+ Instruction* instr2 = pc + 2 * kInstrSize;
+ Instruction* instr3 = pc + 3 * kInstrSize;
+ Instruction* instr4 = pc + 4 * kInstrSize;
+ Instruction* instr5 = pc + 5 * kInstrSize;
+ Instruction* instr6 = pc + 6 * kInstrSize;
+ Instruction* instr7 = pc + 7 * kInstrSize;
+ MOZ_ASSERT(IsLui(*reinterpret_cast<Instr*>(pc)) &&
+ IsAddiw(*reinterpret_cast<Instr*>(instr1)) &&
+ IsSlli(*reinterpret_cast<Instr*>(instr2)) &&
+ IsAddi(*reinterpret_cast<Instr*>(instr3)) &&
+ IsSlli(*reinterpret_cast<Instr*>(instr4)) &&
+ IsAddi(*reinterpret_cast<Instr*>(instr5)) &&
+ IsSlli(*reinterpret_cast<Instr*>(instr6)) &&
+ IsAddi(*reinterpret_cast<Instr*>(instr7)));
+ // lui(rd, (imm + (1LL << 47) + (1LL << 35) + (1LL << 23) + (1LL << 11)) >>
+ // 48); // Bits 63:48
+ // addiw(rd, rd,
+ // (imm + (1LL << 35) + (1LL << 23) + (1LL << 11)) << 16 >>
+ // 52); // Bits 47:36
+ // slli(rd, rd, 12);
+ // addi(rd, rd, (imm + (1LL << 23) + (1LL << 11)) << 28 >> 52); // Bits
+ // 35:24 slli(rd, rd, 12); addi(rd, rd, (imm + (1LL << 11)) << 40 >> 52); //
+ // Bits 23:12 slli(rd, rd, 12); addi(rd, rd, imm << 52 >> 52); // Bits 11:0
+ *reinterpret_cast<Instr*>(instr0) &= 0xfff;
+ *reinterpret_cast<Instr*>(instr0) |=
+ (((value + (1LL << 47) + (1LL << 35) + (1LL << 23) + (1LL << 11)) >> 48)
+ << 12);
+ *reinterpret_cast<Instr*>(instr1) &= 0xfffff;
+ *reinterpret_cast<Instr*>(instr1) |=
+ (((value + (1LL << 35) + (1LL << 23) + (1LL << 11)) << 16 >> 52) << 20);
+ *reinterpret_cast<Instr*>(instr3) &= 0xfffff;
+ *reinterpret_cast<Instr*>(instr3) |=
+ (((value + (1LL << 23) + (1LL << 11)) << 28 >> 52) << 20);
+ *reinterpret_cast<Instr*>(instr5) &= 0xfffff;
+ *reinterpret_cast<Instr*>(instr5) |=
+ (((value + (1LL << 11)) << 40 >> 52) << 20);
+ *reinterpret_cast<Instr*>(instr7) &= 0xfffff;
+ *reinterpret_cast<Instr*>(instr7) |= ((value << 52 >> 52) << 20);
+ disassembleInstr(instr0->InstructionBits());
+ disassembleInstr(instr1->InstructionBits());
+ disassembleInstr(instr2->InstructionBits());
+ disassembleInstr(instr3->InstructionBits());
+ disassembleInstr(instr4->InstructionBits());
+ disassembleInstr(instr5->InstructionBits());
+ disassembleInstr(instr6->InstructionBits());
+ disassembleInstr(instr7->InstructionBits());
+ MOZ_ASSERT(ExtractLoad64Value(pc) == value);
+ } else {
+ Instruction* instr0 = pc;
+ Instruction* instr2 = pc + 2 * kInstrSize;
+ Instruction* instr3 = pc + 3 * kInstrSize;
+ Instruction* instr4 = pc + 4 * kInstrSize;
+ Instruction* instr5 = pc + 5 * kInstrSize;
+ Instruction* instr6 = pc + 6 * kInstrSize;
+ Instruction* instr7 = pc + 7 * kInstrSize;
+ disassembleInstr(instr0->InstructionBits());
+ disassembleInstr(instr1->InstructionBits());
+ disassembleInstr(instr2->InstructionBits());
+ disassembleInstr(instr3->InstructionBits());
+ disassembleInstr(instr4->InstructionBits());
+ disassembleInstr(instr5->InstructionBits());
+ disassembleInstr(instr6->InstructionBits());
+ disassembleInstr(instr7->InstructionBits());
+ MOZ_ASSERT(IsAddi(*reinterpret_cast<Instr*>(instr1)));
+ set_target_value_at(pc, value);
+ }
+}
+
+void Assembler::set_target_value_at(Instruction* pc, uint64_t target) {
+ DEBUG_PRINTF("\tset_target_value_at: pc: %p\ttarget: %lx\n", pc, target);
+ uint32_t* p = reinterpret_cast<uint32_t*>(pc);
+ MOZ_ASSERT((target & 0xffff000000000000ll) == 0);
+#ifdef DEBUG
+ // Check we have the result from a li macro-instruction.
+ Instruction* instr0 = pc;
+ Instruction* instr1 = pc + 1 * kInstrSize;
+ Instruction* instr3 = pc + 3 * kInstrSize;
+ Instruction* instr5 = pc + 5 * kInstrSize;
+ MOZ_ASSERT(IsLui(*reinterpret_cast<Instr*>(instr0)) &&
+ IsAddi(*reinterpret_cast<Instr*>(instr1)) &&
+ IsOri(*reinterpret_cast<Instr*>(instr3)) &&
+ IsOri(*reinterpret_cast<Instr*>(instr5)));
+#endif
+ int64_t a6 = target & 0x3f; // bits 0:6. 6 bits
+ int64_t b11 = (target >> 6) & 0x7ff; // bits 6:11. 11 bits
+ int64_t high_31 = (target >> 17) & 0x7fffffff; // 31 bits
+ int64_t high_20 = ((high_31 + 0x800) >> 12); // 19 bits
+ int64_t low_12 = high_31 & 0xfff; // 12 bits
+ *p = *p & 0xfff;
+ *p = *p | ((int32_t)high_20 << 12);
+ *(p + 1) = *(p + 1) & 0xfffff;
+ *(p + 1) = *(p + 1) | ((int32_t)low_12 << 20);
+ *(p + 2) = *(p + 2) & 0xfffff;
+ *(p + 2) = *(p + 2) | (11 << 20);
+ *(p + 3) = *(p + 3) & 0xfffff;
+ *(p + 3) = *(p + 3) | ((int32_t)b11 << 20);
+ *(p + 4) = *(p + 4) & 0xfffff;
+ *(p + 4) = *(p + 4) | (6 << 20);
+ *(p + 5) = *(p + 5) & 0xfffff;
+ *(p + 5) = *(p + 5) | ((int32_t)a6 << 20);
+ MOZ_ASSERT(target_address_at(pc) == target);
+}
+
+void Assembler::WriteLoad64Instructions(Instruction* inst0, Register reg,
+ uint64_t value) {
+ DEBUG_PRINTF("\tWriteLoad64Instructions\n");
+ // Initialize rd with an address
+ // Pointers are 48 bits
+ // 6 fixed instructions are generated
+ MOZ_ASSERT((value & 0xfff0000000000000ll) == 0);
+ int64_t a6 = value & 0x3f; // bits 0:5. 6 bits
+ int64_t b11 = (value >> 6) & 0x7ff; // bits 6:11. 11 bits
+ int64_t high_31 = (value >> 17) & 0x7fffffff; // 31 bits
+ int64_t high_20 = ((high_31 + 0x800) >> 12); // 19 bits
+ int64_t low_12 = high_31 & 0xfff; // 12 bits
+ Instr lui_ = LUI | (reg.code() << kRdShift) |
+ ((int32_t)high_20 << kImm20Shift); // lui(rd, (int32_t)high_20);
+ *reinterpret_cast<Instr*>(inst0) = lui_;
+
+ Instr addi_ =
+ OP_IMM | (reg.code() << kRdShift) | (0b000 << kFunct3Shift) |
+ (reg.code() << kRs1Shift) |
+ (low_12 << kImm12Shift); // addi(rd, rd, low_12); // 31 bits in rd.
+ *reinterpret_cast<Instr*>(inst0 + 1 * kInstrSize) = addi_;
+
+ Instr slli_ =
+ OP_IMM | (reg.code() << kRdShift) | (0b001 << kFunct3Shift) |
+ (reg.code() << kRs1Shift) |
+ (11 << kImm12Shift); // slli(rd, rd, 11); // Space for next 11 bis
+ *reinterpret_cast<Instr*>(inst0 + 2 * kInstrSize) = slli_;
+
+ Instr ori_b11 = OP_IMM | (reg.code() << kRdShift) | (0b110 << kFunct3Shift) |
+ (reg.code() << kRs1Shift) |
+ (b11 << kImm12Shift); // ori(rd, rd, b11); // 11 bits
+ // are put in. 42 bit in rd
+ *reinterpret_cast<Instr*>(inst0 + 3 * kInstrSize) = ori_b11;
+
+ slli_ = OP_IMM | (reg.code() << kRdShift) | (0b001 << kFunct3Shift) |
+ (reg.code() << kRs1Shift) |
+ (6 << kImm12Shift); // slli(rd, rd, 6); // Space for next 11 bis
+ *reinterpret_cast<Instr*>(inst0 + 4 * kInstrSize) =
+ slli_; // slli(rd, rd, 6); // Space for next 6 bits
+
+ Instr ori_a6 = OP_IMM | (reg.code() << kRdShift) | (0b110 << kFunct3Shift) |
+ (reg.code() << kRs1Shift) |
+ (a6 << kImm12Shift); // ori(rd, rd, a6); // 6 bits are
+ // put in. 48 bis in rd
+ *reinterpret_cast<Instr*>(inst0 + 5 * kInstrSize) = ori_a6;
+ disassembleInstr((inst0 + 0 * kInstrSize)->InstructionBits());
+ disassembleInstr((inst0 + 1 * kInstrSize)->InstructionBits());
+ disassembleInstr((inst0 + 2 * kInstrSize)->InstructionBits());
+ disassembleInstr((inst0 + 3 * kInstrSize)->InstructionBits());
+ disassembleInstr((inst0 + 4 * kInstrSize)->InstructionBits());
+ disassembleInstr((inst0 + 5 * kInstrSize)->InstructionBits());
+ disassembleInstr((inst0 + 6 * kInstrSize)->InstructionBits());
+ MOZ_ASSERT(ExtractLoad64Value(inst0) == value);
+}
+
+// This just stomps over memory with 32 bits of raw data. Its purpose is to
+// overwrite the call of JITed code with 32 bits worth of an offset. This will
+// is only meant to function on code that has been invalidated, so it should
+// be totally safe. Since that instruction will never be executed again, a
+// ICache flush should not be necessary
+void Assembler::PatchWrite_Imm32(CodeLocationLabel label, Imm32 imm) {
+ // Raw is going to be the return address.
+ uint32_t* raw = (uint32_t*)label.raw();
+ // Overwrite the 4 bytes before the return address, which will
+ // end up being the call instruction.
+ *(raw - 1) = imm.value;
+}
+
+void Assembler::target_at_put(BufferOffset pos, BufferOffset target_pos,
+ bool trampoline) {
+ if (m_buffer.oom()) {
+ return;
+ }
+ DEBUG_PRINTF("\ttarget_at_put: %p (%d) to %p (%d)\n",
+ reinterpret_cast<Instr*>(editSrc(pos)), pos.getOffset(),
+ reinterpret_cast<Instr*>(editSrc(pos)) + target_pos.getOffset() -
+ pos.getOffset(),
+ target_pos.getOffset());
+ Instruction* instruction = editSrc(pos);
+ Instr instr = instruction->InstructionBits();
+ switch (instruction->InstructionOpcodeType()) {
+ case BRANCH: {
+ instr = SetBranchOffset(pos.getOffset(), target_pos.getOffset(), instr);
+ instr_at_put(pos, instr);
+ } break;
+ case JAL: {
+ MOZ_ASSERT(IsJal(instr));
+ instr = SetJalOffset(pos.getOffset(), target_pos.getOffset(), instr);
+ instr_at_put(pos, instr);
+ } break;
+ case LUI: {
+ set_target_value_at(instruction,
+ reinterpret_cast<uintptr_t>(editSrc(target_pos)));
+ } break;
+ case AUIPC: {
+ Instr instr_auipc = instr;
+ Instr instr_I =
+ editSrc(BufferOffset(pos.getOffset() + 4))->InstructionBits();
+ MOZ_ASSERT(IsJalr(instr_I) || IsAddi(instr_I));
+
+ intptr_t offset = target_pos.getOffset() - pos.getOffset();
+ if (is_int21(offset) && IsJalr(instr_I) && trampoline) {
+ MOZ_ASSERT(is_int21(offset) && ((offset & 1) == 0));
+ Instr instr = JAL;
+ instr = SetJalOffset(pos.getOffset(), target_pos.getOffset(), instr);
+ MOZ_ASSERT(IsJal(instr));
+ MOZ_ASSERT(JumpOffset(instr) == offset);
+ instr_at_put(pos, instr);
+ instr_at_put(BufferOffset(pos.getOffset() + 4), kNopByte);
+ } else {
+ MOZ_RELEASE_ASSERT(is_int32(offset + 0x800));
+ MOZ_ASSERT(instruction->RdValue() ==
+ editSrc(BufferOffset(pos.getOffset() + 4))->Rs1Value());
+ int32_t Hi20 = (((int32_t)offset + 0x800) >> 12);
+ int32_t Lo12 = (int32_t)offset << 20 >> 20;
+
+ instr_auipc =
+ (instr_auipc & ~kImm31_12Mask) | ((Hi20 & kImm19_0Mask) << 12);
+ instr_at_put(pos, instr_auipc);
+
+ const int kImm31_20Mask = ((1 << 12) - 1) << 20;
+ const int kImm11_0Mask = ((1 << 12) - 1);
+ instr_I = (instr_I & ~kImm31_20Mask) | ((Lo12 & kImm11_0Mask) << 20);
+ instr_at_put(BufferOffset(pos.getOffset() + 4), instr_I);
+ }
+ } break;
+ default:
+ UNIMPLEMENTED_RISCV();
+ break;
+ }
+}
+
+const int kEndOfChain = -1;
+const int32_t kEndOfJumpChain = 0;
+
+int Assembler::target_at(BufferOffset pos, bool is_internal) {
+ if (oom()) {
+ return kEndOfChain;
+ }
+ Instruction* instruction = editSrc(pos);
+ Instruction* instruction2 = nullptr;
+ if (IsAuipc(instruction->InstructionBits())) {
+ instruction2 = editSrc(BufferOffset(pos.getOffset() + kInstrSize));
+ }
+ return target_at(instruction, pos, is_internal, instruction2);
+}
+
+int Assembler::target_at(Instruction* instruction, BufferOffset pos,
+ bool is_internal, Instruction* instruction2) {
+ DEBUG_PRINTF("\t target_at: %p(%x)\n\t",
+ reinterpret_cast<Instr*>(instruction), pos.getOffset());
+ disassembleInstr(instruction->InstructionBits());
+ Instr instr = instruction->InstructionBits();
+ switch (instruction->InstructionOpcodeType()) {
+ case BRANCH: {
+ int32_t imm13 = BranchOffset(instr);
+ if (imm13 == kEndOfJumpChain) {
+ // EndOfChain sentinel is returned directly, not relative to pc or pos.
+ return kEndOfChain;
+ } else {
+ DEBUG_PRINTF("\t target_at: %d %d\n", imm13, pos.getOffset() + imm13);
+ return pos.getOffset() + imm13;
+ }
+ }
+ case JAL: {
+ int32_t imm21 = JumpOffset(instr);
+ if (imm21 == kEndOfJumpChain) {
+ // EndOfChain sentinel is returned directly, not relative to pc or pos.
+ return kEndOfChain;
+ } else {
+ DEBUG_PRINTF("\t target_at: %d %d\n", imm21, pos.getOffset() + imm21);
+ return pos.getOffset() + imm21;
+ }
+ }
+ case JALR: {
+ int32_t imm12 = instr >> 20;
+ if (imm12 == kEndOfJumpChain) {
+ // EndOfChain sentinel is returned directly, not relative to pc or pos.
+ return kEndOfChain;
+ } else {
+ DEBUG_PRINTF("\t target_at: %d %d\n", imm12, pos.getOffset() + imm12);
+ return pos.getOffset() + imm12;
+ }
+ }
+ case LUI: {
+ uintptr_t imm = target_address_at(instruction);
+ uintptr_t instr_address = reinterpret_cast<uintptr_t>(instruction);
+ if (imm == kEndOfJumpChain) {
+ return kEndOfChain;
+ } else {
+ MOZ_ASSERT(instr_address - imm < INT_MAX);
+ int32_t delta = static_cast<int32_t>(instr_address - imm);
+ MOZ_ASSERT(pos.getOffset() > delta);
+ return pos.getOffset() - delta;
+ }
+ }
+ case AUIPC: {
+ MOZ_ASSERT(instruction2 != nullptr);
+ Instr instr_auipc = instr;
+ Instr instr_I = instruction2->InstructionBits();
+ MOZ_ASSERT(IsJalr(instr_I) || IsAddi(instr_I));
+ int32_t offset = BrachlongOffset(instr_auipc, instr_I);
+ if (offset == kEndOfJumpChain) return kEndOfChain;
+ DEBUG_PRINTF("\t target_at: %d %d\n", offset, pos.getOffset() + offset);
+ return offset + pos.getOffset();
+ }
+ default: {
+ UNIMPLEMENTED_RISCV();
+ }
+ }
+}
+
+uint32_t Assembler::next_link(Label* L, bool is_internal) {
+ MOZ_ASSERT(L->used());
+ BufferOffset pos(L);
+ int link = target_at(pos, is_internal);
+ if (link == kEndOfChain) {
+ L->reset();
+ return LabelBase::INVALID_OFFSET;
+ } else {
+ MOZ_ASSERT(link >= 0);
+ DEBUG_PRINTF("next: %p to offset %d\n", L, link);
+ L->use(link);
+ return link;
+ }
+}
+
+void Assembler::bind(Label* label, BufferOffset boff) {
+ JitSpew(JitSpew_Codegen, ".set Llabel %p %d", label, currentOffset());
+ DEBUG_PRINTF(".set Llabel %p\n", label);
+ // If our caller didn't give us an explicit target to bind to
+ // then we want to bind to the location of the next instruction
+ BufferOffset dest = boff.assigned() ? boff : nextOffset();
+ if (label->used()) {
+ uint32_t next;
+
+ // A used label holds a link to branch that uses it.
+ do {
+ BufferOffset b(label);
+ DEBUG_PRINTF("\tbind next:%d\n", b.getOffset());
+ // Even a 0 offset may be invalid if we're out of memory.
+ if (oom()) {
+ return;
+ }
+ int fixup_pos = b.getOffset();
+ int dist = dest.getOffset() - fixup_pos;
+ next = next_link(label, false);
+ DEBUG_PRINTF("\t%p fixup: %d next: %d\n", label, fixup_pos, next);
+ DEBUG_PRINTF("\t fixup: %d dest: %d dist: %d %d %d\n", fixup_pos,
+ dest.getOffset(), dist, nextOffset().getOffset(),
+ currentOffset());
+ Instruction* instruction = editSrc(b);
+ Instr instr = instruction->InstructionBits();
+ if (IsBranch(instr)) {
+ if (dist > kMaxBranchOffset) {
+ MOZ_ASSERT(next != LabelBase::INVALID_OFFSET);
+ MOZ_RELEASE_ASSERT((next - fixup_pos) <= kMaxBranchOffset);
+ MOZ_ASSERT(IsAuipc(editSrc(BufferOffset(next))->InstructionBits()));
+ MOZ_ASSERT(
+ IsJalr(editSrc(BufferOffset(next + 4))->InstructionBits()));
+ DEBUG_PRINTF("\t\ttrampolining: %d\n", next);
+ } else {
+ target_at_put(b, dest);
+ BufferOffset deadline(b.getOffset() +
+ ImmBranchMaxForwardOffset(CondBranchRangeType));
+ m_buffer.unregisterBranchDeadline(CondBranchRangeType, deadline);
+ }
+ } else if (IsJal(instr)) {
+ if (dist > kMaxJumpOffset) {
+ MOZ_ASSERT(next != LabelBase::INVALID_OFFSET);
+ MOZ_RELEASE_ASSERT((next - fixup_pos) <= kMaxJumpOffset);
+ MOZ_ASSERT(IsAuipc(editSrc(BufferOffset(next))->InstructionBits()));
+ MOZ_ASSERT(
+ IsJalr(editSrc(BufferOffset(next + 4))->InstructionBits()));
+ DEBUG_PRINTF("\t\ttrampolining: %d\n", next);
+ } else {
+ target_at_put(b, dest);
+ BufferOffset deadline(
+ b.getOffset() + ImmBranchMaxForwardOffset(UncondBranchRangeType));
+ m_buffer.unregisterBranchDeadline(UncondBranchRangeType, deadline);
+ }
+ } else {
+ MOZ_ASSERT(IsAuipc(instr));
+ target_at_put(b, dest);
+ }
+ } while (next != LabelBase::INVALID_OFFSET);
+ }
+ label->bind(dest.getOffset());
+}
+
+void Assembler::Bind(uint8_t* rawCode, const CodeLabel& label) {
+ if (label.patchAt().bound()) {
+ auto mode = label.linkMode();
+ intptr_t offset = label.patchAt().offset();
+ intptr_t target = label.target().offset();
+
+ if (mode == CodeLabel::RawPointer) {
+ *reinterpret_cast<const void**>(rawCode + offset) = rawCode + target;
+ } else {
+ MOZ_ASSERT(mode == CodeLabel::MoveImmediate ||
+ mode == CodeLabel::JumpImmediate);
+ Instruction* inst = (Instruction*)(rawCode + offset);
+ Assembler::UpdateLoad64Value(inst, (uint64_t)(rawCode + target));
+ }
+ }
+}
+
+bool Assembler::is_near(Label* L) {
+ MOZ_ASSERT(L->bound());
+ return is_intn((currentOffset() - L->offset()), kJumpOffsetBits);
+}
+
+bool Assembler::is_near(Label* L, OffsetSize bits) {
+ if (L == nullptr || !L->bound()) return true;
+ return is_intn((currentOffset() - L->offset()), bits);
+}
+
+bool Assembler::is_near_branch(Label* L) {
+ MOZ_ASSERT(L->bound());
+ return is_intn((currentOffset() - L->offset()), kBranchOffsetBits);
+}
+
+int32_t Assembler::branch_long_offset(Label* L) {
+ if (oom()) {
+ return kEndOfJumpChain;
+ }
+ intptr_t target_pos;
+ BufferOffset next_instr_offset = nextInstrOffset(2);
+ DEBUG_PRINTF("\tbranch_long_offset: %p to (%d)\n", L,
+ next_instr_offset.getOffset());
+ if (L->bound()) {
+ JitSpew(JitSpew_Codegen, ".use Llabel %p on %d", L,
+ next_instr_offset.getOffset());
+ target_pos = L->offset();
+ } else {
+ if (L->used()) {
+ LabelCahe::Ptr p = label_cache_.lookup(L->offset());
+ MOZ_ASSERT(p);
+ MOZ_ASSERT(p->key() == L->offset());
+ target_pos = p->value().getOffset();
+ target_at_put(BufferOffset(target_pos), next_instr_offset);
+ DEBUG_PRINTF("\tLabel %p added to link: %d\n", L,
+ next_instr_offset.getOffset());
+ bool ok = label_cache_.put(L->offset(), next_instr_offset);
+ if (!ok) {
+ NoEnoughLabelCache();
+ }
+ return kEndOfJumpChain;
+ } else {
+ JitSpew(JitSpew_Codegen, ".use Llabel %p on %d", L,
+ next_instr_offset.getOffset());
+ L->use(next_instr_offset.getOffset());
+ DEBUG_PRINTF("\tLabel %p added to link: %d\n", L,
+ next_instr_offset.getOffset());
+ bool ok = label_cache_.putNew(L->offset(), next_instr_offset);
+ if (!ok) {
+ NoEnoughLabelCache();
+ }
+ return kEndOfJumpChain;
+ }
+ }
+ intptr_t offset = target_pos - next_instr_offset.getOffset();
+ MOZ_ASSERT((offset & 3) == 0);
+ MOZ_ASSERT(is_int32(offset));
+ return static_cast<int32_t>(offset);
+}
+
+int32_t Assembler::branch_offset_helper(Label* L, OffsetSize bits) {
+ if (oom()) {
+ return kEndOfJumpChain;
+ }
+ int32_t target_pos;
+ BufferOffset next_instr_offset = nextInstrOffset();
+ DEBUG_PRINTF("\tbranch_offset_helper: %p to %d\n", L,
+ next_instr_offset.getOffset());
+ // This is the last possible branch target.
+ if (L->bound()) {
+ JitSpew(JitSpew_Codegen, ".use Llabel %p on %d", L,
+ next_instr_offset.getOffset());
+ target_pos = L->offset();
+ } else {
+ BufferOffset deadline(next_instr_offset.getOffset() +
+ ImmBranchMaxForwardOffset(bits));
+ DEBUG_PRINTF("\tregisterBranchDeadline %d type %d\n", deadline.getOffset(),
+ OffsetSizeToImmBranchRangeType(bits));
+ m_buffer.registerBranchDeadline(OffsetSizeToImmBranchRangeType(bits),
+ deadline);
+ if (L->used()) {
+ LabelCahe::Ptr p = label_cache_.lookup(L->offset());
+ MOZ_ASSERT(p);
+ MOZ_ASSERT(p->key() == L->offset());
+ target_pos = p->value().getOffset();
+ target_at_put(BufferOffset(target_pos), next_instr_offset);
+ DEBUG_PRINTF("\tLabel %p added to link: %d\n", L,
+ next_instr_offset.getOffset());
+ bool ok = label_cache_.put(L->offset(), next_instr_offset);
+ if (!ok) {
+ NoEnoughLabelCache();
+ }
+ return kEndOfJumpChain;
+ } else {
+ JitSpew(JitSpew_Codegen, ".use Llabel %p on %d", L,
+ next_instr_offset.getOffset());
+ L->use(next_instr_offset.getOffset());
+ bool ok = label_cache_.putNew(L->offset(), next_instr_offset);
+ if (!ok) {
+ NoEnoughLabelCache();
+ }
+ DEBUG_PRINTF("\tLabel %p added to link: %d\n", L,
+ next_instr_offset.getOffset());
+ return kEndOfJumpChain;
+ }
+ }
+
+ int32_t offset = target_pos - next_instr_offset.getOffset();
+ DEBUG_PRINTF("\toffset = %d\n", offset);
+ MOZ_ASSERT(is_intn(offset, bits));
+ MOZ_ASSERT((offset & 1) == 0);
+ return offset;
+}
+
+Assembler::Condition Assembler::InvertCondition(Condition cond) {
+ switch (cond) {
+ case Equal:
+ return NotEqual;
+ case NotEqual:
+ return Equal;
+ case Zero:
+ return NonZero;
+ case NonZero:
+ return Zero;
+ case LessThan:
+ return GreaterThanOrEqual;
+ case LessThanOrEqual:
+ return GreaterThan;
+ case GreaterThan:
+ return LessThanOrEqual;
+ case GreaterThanOrEqual:
+ return LessThan;
+ case Above:
+ return BelowOrEqual;
+ case AboveOrEqual:
+ return Below;
+ case Below:
+ return AboveOrEqual;
+ case BelowOrEqual:
+ return Above;
+ case Signed:
+ return NotSigned;
+ case NotSigned:
+ return Signed;
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+Assembler::DoubleCondition Assembler::InvertCondition(DoubleCondition cond) {
+ switch (cond) {
+ case DoubleOrdered:
+ return DoubleUnordered;
+ case DoubleEqual:
+ return DoubleNotEqualOrUnordered;
+ case DoubleNotEqual:
+ return DoubleEqualOrUnordered;
+ case DoubleGreaterThan:
+ return DoubleLessThanOrEqualOrUnordered;
+ case DoubleGreaterThanOrEqual:
+ return DoubleLessThanOrUnordered;
+ case DoubleLessThan:
+ return DoubleGreaterThanOrEqualOrUnordered;
+ case DoubleLessThanOrEqual:
+ return DoubleGreaterThanOrUnordered;
+ case DoubleUnordered:
+ return DoubleOrdered;
+ case DoubleEqualOrUnordered:
+ return DoubleNotEqual;
+ case DoubleNotEqualOrUnordered:
+ return DoubleEqual;
+ case DoubleGreaterThanOrUnordered:
+ return DoubleLessThanOrEqual;
+ case DoubleGreaterThanOrEqualOrUnordered:
+ return DoubleLessThan;
+ case DoubleLessThanOrUnordered:
+ return DoubleGreaterThanOrEqual;
+ case DoubleLessThanOrEqualOrUnordered:
+ return DoubleGreaterThan;
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+// Break / Trap instructions.
+void Assembler::break_(uint32_t code, bool break_as_stop) {
+ // We need to invalidate breaks that could be stops as well because the
+ // simulator expects a char pointer after the stop instruction.
+ // See constants-mips.h for explanation.
+ MOZ_ASSERT(
+ (break_as_stop && code <= kMaxStopCode && code > kMaxTracepointCode) ||
+ (!break_as_stop && (code > kMaxStopCode || code <= kMaxTracepointCode)));
+
+ // since ebreak does not allow additional immediate field, we use the
+ // immediate field of lui instruction immediately following the ebreak to
+ // encode the "code" info
+ ebreak();
+ MOZ_ASSERT(is_uint20(code));
+ lui(zero_reg, code);
+}
+
+void Assembler::ToggleToJmp(CodeLocationLabel inst_) {
+ Instruction* inst = (Instruction*)inst_.raw();
+ MOZ_ASSERT(IsAddi(inst->InstructionBits()));
+ int32_t offset = inst->Imm12Value();
+ MOZ_ASSERT(is_int12(offset));
+ Instr jal_ = JAL | (0b000 << kFunct3Shift) |
+ (offset & 0xff000) | // bits 19-12
+ ((offset & 0x800) << 9) | // bit 11
+ ((offset & 0x7fe) << 20) | // bits 10-1
+ ((offset & 0x100000) << 11); // bit 20
+ // jal(zero, offset);
+ *reinterpret_cast<Instr*>(inst) = jal_;
+}
+
+void Assembler::ToggleToCmp(CodeLocationLabel inst_) {
+ Instruction* inst = (Instruction*)inst_.raw();
+
+ // toggledJump is allways used for short jumps.
+ MOZ_ASSERT(IsJal(inst->InstructionBits()));
+ // Replace "jal zero_reg, offset" with "addi $zero, $zero, offset"
+ int32_t offset = inst->Imm20JValue();
+ MOZ_ASSERT(is_int12(offset));
+ Instr addi_ = OP_IMM | (0b000 << kFunct3Shift) |
+ (offset << kImm12Shift); // addi(zero, zero, low_12);
+ *reinterpret_cast<Instr*>(inst) = addi_;
+}
+
+bool Assembler::reserve(size_t size) {
+ // This buffer uses fixed-size chunks so there's no point in reserving
+ // now vs. on-demand.
+ return !oom();
+}
+
+static JitCode* CodeFromJump(Instruction* jump) {
+ uint8_t* target = (uint8_t*)Assembler::ExtractLoad64Value(jump);
+ return JitCode::FromExecutable(target);
+}
+
+void Assembler::TraceJumpRelocations(JSTracer* trc, JitCode* code,
+ CompactBufferReader& reader) {
+ while (reader.more()) {
+ JitCode* child =
+ CodeFromJump((Instruction*)(code->raw() + reader.readUnsigned()));
+ TraceManuallyBarrieredEdge(trc, &child, "rel32");
+ }
+}
+
+static void TraceOneDataRelocation(JSTracer* trc,
+ mozilla::Maybe<AutoWritableJitCode>& awjc,
+ JitCode* code, Instruction* inst) {
+ void* ptr = (void*)Assembler::ExtractLoad64Value(inst);
+ void* prior = ptr;
+
+ // Data relocations can be for Values or for raw pointers. If a Value is
+ // zero-tagged, we can trace it as if it were a raw pointer. If a Value
+ // is not zero-tagged, we have to interpret it as a Value to ensure that the
+ // tag bits are masked off to recover the actual pointer.
+ uintptr_t word = reinterpret_cast<uintptr_t>(ptr);
+ if (word >> JSVAL_TAG_SHIFT) {
+ // This relocation is a Value with a non-zero tag.
+ Value v = Value::fromRawBits(word);
+ TraceManuallyBarrieredEdge(trc, &v, "jit-masm-value");
+ ptr = (void*)v.bitsAsPunboxPointer();
+ } else {
+ // This relocation is a raw pointer or a Value with a zero tag.
+ // No barrier needed since these are constants.
+ TraceManuallyBarrieredGenericPointerEdge(
+ trc, reinterpret_cast<gc::Cell**>(&ptr), "jit-masm-ptr");
+ }
+
+ if (ptr != prior) {
+ if (awjc.isNothing()) {
+ awjc.emplace(code);
+ }
+ Assembler::UpdateLoad64Value(inst, uint64_t(ptr));
+ }
+}
+
+/* static */
+void Assembler::TraceDataRelocations(JSTracer* trc, JitCode* code,
+ CompactBufferReader& reader) {
+ mozilla::Maybe<AutoWritableJitCode> awjc;
+ while (reader.more()) {
+ size_t offset = reader.readUnsigned();
+ Instruction* inst = (Instruction*)(code->raw() + offset);
+ TraceOneDataRelocation(trc, awjc, code, inst);
+ }
+}
+
+UseScratchRegisterScope::UseScratchRegisterScope(Assembler* assembler)
+ : available_(assembler->GetScratchRegisterList()),
+ old_available_(*available_) {}
+
+UseScratchRegisterScope::~UseScratchRegisterScope() {
+ *available_ = old_available_;
+}
+
+Register UseScratchRegisterScope::Acquire() {
+ MOZ_ASSERT(available_ != nullptr);
+ MOZ_ASSERT(!available_->empty());
+ Register index = GeneralRegisterSet::FirstRegister(available_->bits());
+ available_->takeRegisterIndex(index);
+ return index;
+}
+
+bool UseScratchRegisterScope::hasAvailable() const {
+ return (available_->size()) != 0;
+}
+
+void Assembler::retarget(Label* label, Label* target) {
+ spew("retarget %p -> %p", label, target);
+ if (label->used() && !oom()) {
+ if (target->bound()) {
+ bind(label, BufferOffset(target));
+ } else if (target->used()) {
+ // The target is not bound but used. Prepend label's branch list
+ // onto target's.
+ int32_t next;
+ BufferOffset labelBranchOffset(label);
+
+ // Find the head of the use chain for label.
+ do {
+ next = next_link(label, false);
+ labelBranchOffset = BufferOffset(next);
+ } while (next != LabelBase::INVALID_OFFSET);
+
+ // Then patch the head of label's use chain to the tail of
+ // target's use chain, prepending the entire use chain of target.
+ target->use(label->offset());
+ target_at_put(labelBranchOffset, BufferOffset(target));
+ MOZ_CRASH("check");
+ } else {
+ // The target is unbound and unused. We can just take the head of
+ // the list hanging off of label, and dump that into target.
+ target->use(label->offset());
+ }
+ }
+ label->reset();
+}
+
+bool Assembler::appendRawCode(const uint8_t* code, size_t numBytes) {
+ if (m_buffer.oom()) {
+ return false;
+ }
+ while (numBytes > SliceSize) {
+ m_buffer.putBytes(SliceSize, code);
+ numBytes -= SliceSize;
+ code += SliceSize;
+ }
+ m_buffer.putBytes(numBytes, code);
+ return !m_buffer.oom();
+}
+
+void Assembler::ToggleCall(CodeLocationLabel inst_, bool enabled) {
+ Instruction* i0 = (Instruction*)inst_.raw();
+ Instruction* i1 = (Instruction*)(inst_.raw() + 1 * kInstrSize);
+ Instruction* i2 = (Instruction*)(inst_.raw() + 2 * kInstrSize);
+ Instruction* i3 = (Instruction*)(inst_.raw() + 3 * kInstrSize);
+ Instruction* i4 = (Instruction*)(inst_.raw() + 4 * kInstrSize);
+ Instruction* i5 = (Instruction*)(inst_.raw() + 5 * kInstrSize);
+ Instruction* i6 = (Instruction*)(inst_.raw() + 6 * kInstrSize);
+
+ MOZ_ASSERT(IsLui(i0->InstructionBits()));
+ MOZ_ASSERT(IsAddi(i1->InstructionBits()));
+ MOZ_ASSERT(IsSlli(i2->InstructionBits()));
+ MOZ_ASSERT(IsOri(i3->InstructionBits()));
+ MOZ_ASSERT(IsSlli(i4->InstructionBits()));
+ MOZ_ASSERT(IsOri(i5->InstructionBits()));
+ if (enabled) {
+ Instr jalr_ = JALR | (ra.code() << kRdShift) | (0x0 << kFunct3Shift) |
+ (i5->RdValue() << kRs1Shift) | (0x0 << kImm12Shift);
+ *((Instr*)i6) = jalr_;
+ } else {
+ *((Instr*)i6) = kNopByte;
+ }
+}
+
+void Assembler::PatchShortRangeBranchToVeneer(Buffer* buffer, unsigned rangeIdx,
+ BufferOffset deadline,
+ BufferOffset veneer) {
+ if (buffer->oom()) {
+ return;
+ }
+ DEBUG_PRINTF("\tPatchShortRangeBranchToVeneer\n");
+ // Reconstruct the position of the branch from (rangeIdx, deadline).
+ ImmBranchRangeType branchRange = static_cast<ImmBranchRangeType>(rangeIdx);
+ BufferOffset branch(deadline.getOffset() -
+ ImmBranchMaxForwardOffset(branchRange));
+ Instruction* branchInst = buffer->getInst(branch);
+ Instruction* veneerInst_1 = buffer->getInst(veneer);
+ Instruction* veneerInst_2 =
+ buffer->getInst(BufferOffset(veneer.getOffset() + 4));
+ // Verify that the branch range matches what's encoded.
+ DEBUG_PRINTF("\t%p(%x): ", branchInst, branch.getOffset());
+ disassembleInstr(branchInst->InstructionBits(), JitSpew_Codegen);
+ DEBUG_PRINTF("\t instert veneer %x, branch:%x deadline: %x\n",
+ veneer.getOffset(), branch.getOffset(), deadline.getOffset());
+ MOZ_ASSERT(branchRange <= UncondBranchRangeType);
+ MOZ_ASSERT(branchInst->GetImmBranchRangeType() == branchRange);
+ // emit a long jump slot
+ Instr auipc = AUIPC | (t6.code() << kRdShift) | (0x0 << kImm20Shift);
+ Instr jalr = JALR | (zero_reg.code() << kRdShift) | (0x0 << kFunct3Shift) |
+ (t6.code() << kRs1Shift) | (0x0 << kImm12Shift);
+
+ // We want to insert veneer after branch in the linked list of instructions
+ // that use the same unbound label.
+ // The veneer should be an unconditional branch.
+ int32_t nextElemOffset = target_at(buffer->getInst(branch), branch, false);
+ int32_t dist;
+ // If offset is 0, this is the end of the linked list.
+ if (nextElemOffset != kEndOfChain) {
+ // Make the offset relative to veneer so it targets the same instruction
+ // as branchInst.
+ dist = nextElemOffset - veneer.getOffset();
+ } else {
+ dist = 0;
+ }
+ int32_t Hi20 = (((int32_t)dist + 0x800) >> 12);
+ int32_t Lo12 = (int32_t)dist << 20 >> 20;
+ auipc = SetAuipcOffset(Hi20, auipc);
+ jalr = SetJalrOffset(Lo12, jalr);
+ // insert veneer
+ veneerInst_1->SetInstructionBits(auipc);
+ veneerInst_2->SetInstructionBits(jalr);
+ // Now link branchInst to veneer.
+ if (IsBranch(branchInst->InstructionBits())) {
+ branchInst->SetInstructionBits(SetBranchOffset(
+ branch.getOffset(), veneer.getOffset(), branchInst->InstructionBits()));
+ } else {
+ MOZ_ASSERT(IsJal(branchInst->InstructionBits()));
+ branchInst->SetInstructionBits(SetJalOffset(
+ branch.getOffset(), veneer.getOffset(), branchInst->InstructionBits()));
+ }
+ DEBUG_PRINTF("\tfix to veneer:");
+ disassembleInstr(branchInst->InstructionBits());
+}
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/riscv64/Assembler-riscv64.h b/js/src/jit/riscv64/Assembler-riscv64.h
new file mode 100644
index 0000000000..4086b38ff7
--- /dev/null
+++ b/js/src/jit/riscv64/Assembler-riscv64.h
@@ -0,0 +1,685 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2021 the V8 project authors. All rights reserved.
+
+#ifndef jit_riscv64_Assembler_riscv64_h
+#define jit_riscv64_Assembler_riscv64_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Sprintf.h"
+
+#include <stdint.h>
+
+#include "jit/CompactBuffer.h"
+#include "jit/JitCode.h"
+#include "jit/JitSpewer.h"
+#include "jit/Registers.h"
+#include "jit/RegisterSets.h"
+#include "jit/riscv64/Architecture-riscv64.h"
+#include "jit/riscv64/constant/Constant-riscv64.h"
+#include "jit/riscv64/extension/base-assembler-riscv.h"
+#include "jit/riscv64/extension/base-riscv-i.h"
+#include "jit/riscv64/extension/extension-riscv-a.h"
+#include "jit/riscv64/extension/extension-riscv-c.h"
+#include "jit/riscv64/extension/extension-riscv-d.h"
+#include "jit/riscv64/extension/extension-riscv-f.h"
+#include "jit/riscv64/extension/extension-riscv-m.h"
+#include "jit/riscv64/extension/extension-riscv-v.h"
+#include "jit/riscv64/extension/extension-riscv-zicsr.h"
+#include "jit/riscv64/extension/extension-riscv-zifencei.h"
+#include "jit/riscv64/Register-riscv64.h"
+#include "jit/shared/Assembler-shared.h"
+#include "jit/shared/Disassembler-shared.h"
+#include "jit/shared/IonAssemblerBufferWithConstantPools.h"
+#include "js/HashTable.h"
+#include "wasm/WasmTypeDecls.h"
+namespace js {
+namespace jit {
+
+struct ScratchFloat32Scope : public AutoFloatRegisterScope {
+ explicit ScratchFloat32Scope(MacroAssembler& masm)
+ : AutoFloatRegisterScope(masm, ScratchFloat32Reg) {}
+};
+
+struct ScratchDoubleScope : public AutoFloatRegisterScope {
+ explicit ScratchDoubleScope(MacroAssembler& masm)
+ : AutoFloatRegisterScope(masm, ScratchDoubleReg) {}
+};
+
+struct ScratchRegisterScope : public AutoRegisterScope {
+ explicit ScratchRegisterScope(MacroAssembler& masm)
+ : AutoRegisterScope(masm, ScratchRegister) {}
+};
+
+class MacroAssembler;
+
+inline Imm32 Imm64::secondHalf() const { return hi(); }
+inline Imm32 Imm64::firstHalf() const { return low(); }
+
+static constexpr uint32_t ABIStackAlignment = 8;
+static constexpr uint32_t CodeAlignment = 16;
+static constexpr uint32_t JitStackAlignment = 8;
+static constexpr uint32_t JitStackValueAlignment =
+ JitStackAlignment / sizeof(Value);
+static const uint32_t WasmStackAlignment = 16;
+static const uint32_t WasmTrapInstructionLength = 2 * sizeof(uint32_t);
+// See comments in wasm::GenerateFunctionPrologue. The difference between these
+// is the size of the largest callable prologue on the platform.
+static constexpr uint32_t WasmCheckedCallEntryOffset = 0u;
+static constexpr uint32_t WasmCheckedTailEntryOffset = 20u;
+
+static const Scale ScalePointer = TimesEight;
+
+class Assembler;
+
+static constexpr int32_t SliceSize = 1024;
+
+typedef js::jit::AssemblerBufferWithConstantPools<
+ SliceSize, 4, Instruction, Assembler, NumShortBranchRangeTypes>
+ Buffer;
+
+class Assembler : public AssemblerShared,
+ public AssemblerRISCVI,
+ public AssemblerRISCVA,
+ public AssemblerRISCVF,
+ public AssemblerRISCVD,
+ public AssemblerRISCVM,
+ public AssemblerRISCVC,
+ public AssemblerRISCVZicsr,
+ public AssemblerRISCVZifencei {
+ GeneralRegisterSet scratch_register_list_;
+
+ static constexpr int kInvalidSlotPos = -1;
+
+#ifdef JS_JITSPEW
+ Sprinter* printer;
+#endif
+ bool enoughLabelCache_ = true;
+
+ protected:
+ using LabelOffset = int32_t;
+ using LabelCahe =
+ HashMap<LabelOffset, BufferOffset, js::DefaultHasher<LabelOffset>,
+ js::SystemAllocPolicy>;
+ LabelCahe label_cache_;
+ void NoEnoughLabelCache() { enoughLabelCache_ = false; }
+ CompactBufferWriter jumpRelocations_;
+ CompactBufferWriter dataRelocations_;
+ Buffer m_buffer;
+ bool isFinished = false;
+ Instruction* editSrc(BufferOffset bo) { return m_buffer.getInst(bo); }
+
+ struct RelativePatch {
+ // the offset within the code buffer where the value is loaded that
+ // we want to fix-up
+ BufferOffset offset;
+ void* target;
+ RelocationKind kind;
+
+ RelativePatch(BufferOffset offset, void* target, RelocationKind kind)
+ : offset(offset), target(target), kind(kind) {}
+ };
+
+ js::Vector<RelativePatch, 8, SystemAllocPolicy> jumps_;
+
+ void addPendingJump(BufferOffset src, ImmPtr target, RelocationKind kind) {
+ enoughMemory_ &= jumps_.append(RelativePatch(src, target.value, kind));
+ if (kind == RelocationKind::JITCODE) {
+ jumpRelocations_.writeUnsigned(src.getOffset());
+ }
+ }
+
+ void addLongJump(BufferOffset src, BufferOffset dst) {
+ CodeLabel cl;
+ cl.patchAt()->bind(src.getOffset());
+ cl.target()->bind(dst.getOffset());
+ cl.setLinkMode(CodeLabel::JumpImmediate);
+ addCodeLabel(std::move(cl));
+ }
+
+ public:
+ static bool FLAG_riscv_debug;
+
+ Assembler()
+ : scratch_register_list_((1 << t5.code()) | (1 << t4.code()) |
+ (1 << t6.code())),
+#ifdef JS_JITSPEW
+ printer(nullptr),
+#endif
+ m_buffer(/*guardSize*/ 2, /*headerSize*/ 2, /*instBufferAlign*/ 8,
+ /*poolMaxOffset*/ GetPoolMaxOffset(), /*pcBias*/ 8,
+ /*alignFillInst*/ kNopByte, /*nopFillInst*/ kNopByte),
+ isFinished(false) {
+ }
+ static uint32_t NopFill;
+ static uint32_t AsmPoolMaxOffset;
+ static uint32_t GetPoolMaxOffset();
+ bool reserve(size_t size);
+ bool oom() const;
+ void setPrinter(Sprinter* sp) {
+#ifdef JS_JITSPEW
+ printer = sp;
+#endif
+ }
+ void finish() {
+ MOZ_ASSERT(!isFinished);
+ isFinished = true;
+ }
+ void enterNoPool(size_t maxInst) { m_buffer.enterNoPool(maxInst); }
+ void leaveNoPool() { m_buffer.leaveNoPool(); }
+ bool swapBuffer(wasm::Bytes& bytes);
+ // Size of the instruction stream, in bytes.
+ size_t size() const;
+ // Size of the data table, in bytes.
+ size_t bytesNeeded() const;
+ // Size of the jump relocation table, in bytes.
+ size_t jumpRelocationTableBytes() const;
+ size_t dataRelocationTableBytes() const;
+ void copyJumpRelocationTable(uint8_t* dest);
+ void copyDataRelocationTable(uint8_t* dest);
+ // Copy the assembly code to the given buffer, and perform any pending
+ // relocations relying on the target address.
+ void executableCopy(uint8_t* buffer);
+ // API for speaking with the IonAssemblerBufferWithConstantPools generate an
+ // initial placeholder instruction that we want to later fix up.
+ static void InsertIndexIntoTag(uint8_t* load, uint32_t index);
+ static void PatchConstantPoolLoad(void* loadAddr, void* constPoolAddr);
+ // We're not tracking short-range branches for ARM for now.
+ static void PatchShortRangeBranchToVeneer(Buffer*, unsigned rangeIdx,
+ BufferOffset deadline,
+ BufferOffset veneer);
+ struct PoolHeader {
+ uint32_t data;
+
+ struct Header {
+ // The size should take into account the pool header.
+ // The size is in units of Instruction (4bytes), not byte.
+ union {
+ struct {
+ uint32_t size : 15;
+
+ // "Natural" guards are part of the normal instruction stream,
+ // while "non-natural" guards are inserted for the sole purpose
+ // of skipping around a pool.
+ uint32_t isNatural : 1;
+ uint32_t ONES : 16;
+ };
+ uint32_t data;
+ };
+
+ Header(int size_, bool isNatural_)
+ : size(size_), isNatural(isNatural_), ONES(0xffff) {}
+
+ Header(uint32_t data) : data(data) {
+ static_assert(sizeof(Header) == sizeof(uint32_t));
+ MOZ_ASSERT(ONES == 0xffff);
+ }
+
+ uint32_t raw() const {
+ static_assert(sizeof(Header) == sizeof(uint32_t));
+ return data;
+ }
+ };
+
+ PoolHeader(int size_, bool isNatural_)
+ : data(Header(size_, isNatural_).raw()) {}
+
+ uint32_t size() const {
+ Header tmp(data);
+ return tmp.size;
+ }
+
+ uint32_t isNatural() const {
+ Header tmp(data);
+ return tmp.isNatural;
+ }
+ };
+
+ static void WritePoolHeader(uint8_t* start, Pool* p, bool isNatural);
+ static void WritePoolGuard(BufferOffset branch, Instruction* inst,
+ BufferOffset dest);
+ void processCodeLabels(uint8_t* rawCode);
+ BufferOffset nextOffset() { return m_buffer.nextOffset(); }
+ // Get the buffer offset of the next inserted instruction. This may flush
+ // constant pools.
+ BufferOffset nextInstrOffset(int numInstr = 1) {
+ return m_buffer.nextInstrOffset(numInstr);
+ }
+ void comment(const char* msg) { spew("; %s", msg); }
+
+#ifdef JS_JITSPEW
+ inline void spew(const char* fmt, ...) MOZ_FORMAT_PRINTF(2, 3) {
+ if (MOZ_UNLIKELY(printer || JitSpewEnabled(JitSpew_Codegen))) {
+ va_list va;
+ va_start(va, fmt);
+ spew(fmt, va);
+ va_end(va);
+ }
+ }
+
+#else
+ MOZ_ALWAYS_INLINE void spew(const char* fmt, ...) MOZ_FORMAT_PRINTF(2, 3) {}
+#endif
+
+#ifdef JS_JITSPEW
+ MOZ_COLD void spew(const char* fmt, va_list va) MOZ_FORMAT_PRINTF(2, 0) {
+ // Buffer to hold the formatted string. Note that this may contain
+ // '%' characters, so do not pass it directly to printf functions.
+ char buf[200];
+
+ int i = VsprintfLiteral(buf, fmt, va);
+ if (i > -1) {
+ if (printer) {
+ printer->printf("%s\n", buf);
+ }
+ js::jit::JitSpew(js::jit::JitSpew_Codegen, "%s", buf);
+ }
+ }
+#endif
+
+ enum Condition {
+ Overflow = overflow,
+ Below = Uless,
+ BelowOrEqual = Uless_equal,
+ Above = Ugreater,
+ AboveOrEqual = Ugreater_equal,
+ Equal = equal,
+ NotEqual = not_equal,
+ GreaterThan = greater,
+ GreaterThanOrEqual = greater_equal,
+ LessThan = less,
+ LessThanOrEqual = less_equal,
+ Always = cc_always,
+ CarrySet,
+ CarryClear,
+ Signed,
+ NotSigned,
+ Zero,
+ NonZero,
+ };
+
+ enum DoubleCondition {
+ // These conditions will only evaluate to true if the comparison is ordered
+ // - i.e. neither operand is NaN.
+ DoubleOrdered,
+ DoubleEqual,
+ DoubleNotEqual,
+ DoubleGreaterThan,
+ DoubleGreaterThanOrEqual,
+ DoubleLessThan,
+ DoubleLessThanOrEqual,
+ // If either operand is NaN, these conditions always evaluate to true.
+ DoubleUnordered,
+ DoubleEqualOrUnordered,
+ DoubleNotEqualOrUnordered,
+ DoubleGreaterThanOrUnordered,
+ DoubleGreaterThanOrEqualOrUnordered,
+ DoubleLessThanOrUnordered,
+ DoubleLessThanOrEqualOrUnordered,
+ FIRST_UNORDERED = DoubleUnordered,
+ LAST_UNORDERED = DoubleLessThanOrEqualOrUnordered
+ };
+
+ Register getStackPointer() const { return StackPointer; }
+ void flushBuffer() {}
+ static int disassembleInstr(Instr instr, bool enable_spew = false);
+ int target_at(BufferOffset pos, bool is_internal);
+ static int target_at(Instruction* instruction, BufferOffset pos,
+ bool is_internal, Instruction* instruction2 = nullptr);
+ uint32_t next_link(Label* label, bool is_internal);
+ static uintptr_t target_address_at(Instruction* pos);
+ static void set_target_value_at(Instruction* pc, uint64_t target);
+ void target_at_put(BufferOffset pos, BufferOffset target_pos,
+ bool trampoline = false);
+ virtual int32_t branch_offset_helper(Label* L, OffsetSize bits);
+ int32_t branch_long_offset(Label* L);
+
+ // Determines if Label is bound and near enough so that branch instruction
+ // can be used to reach it, instead of jump instruction.
+ bool is_near(Label* L);
+ bool is_near(Label* L, OffsetSize bits);
+ bool is_near_branch(Label* L);
+
+ void nopAlign(int m) {
+ MOZ_ASSERT(m >= 4 && (m & (m - 1)) == 0);
+ while ((currentOffset() & (m - 1)) != 0) {
+ nop();
+ }
+ }
+ virtual void emit(Instr x) {
+ MOZ_ASSERT(hasCreator());
+ m_buffer.putInt(x);
+#ifdef DEBUG
+ if (!oom()) {
+ DEBUG_PRINTF(
+ "0x%lx(%lx):",
+ (uint64_t)editSrc(BufferOffset(currentOffset() - sizeof(Instr))),
+ currentOffset() - sizeof(Instr));
+ disassembleInstr(x, JitSpewEnabled(JitSpew_Codegen));
+ }
+#endif
+ }
+ virtual void emit(ShortInstr x) { MOZ_CRASH(); }
+ virtual void emit(uint64_t x) { MOZ_CRASH(); }
+ virtual void emit(uint32_t x) {
+ DEBUG_PRINTF(
+ "0x%lx(%lx): uint32_t: %d\n",
+ (uint64_t)editSrc(BufferOffset(currentOffset() - sizeof(Instr))),
+ currentOffset() - sizeof(Instr), x);
+ m_buffer.putInt(x);
+ }
+
+ void instr_at_put(BufferOffset offset, Instr instr) {
+ DEBUG_PRINTF("\t[instr_at_put\n");
+ DEBUG_PRINTF("\t%p %d \n\t", editSrc(offset), offset.getOffset());
+ disassembleInstr(editSrc(offset)->InstructionBits());
+ DEBUG_PRINTF("\t");
+ *reinterpret_cast<Instr*>(editSrc(offset)) = instr;
+ disassembleInstr(editSrc(offset)->InstructionBits());
+ DEBUG_PRINTF("\t]\n");
+ }
+
+ static Condition InvertCondition(Condition);
+
+ static DoubleCondition InvertCondition(DoubleCondition);
+
+ static uint64_t ExtractLoad64Value(Instruction* inst0);
+ static void UpdateLoad64Value(Instruction* inst0, uint64_t value);
+ static void PatchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue,
+ ImmPtr expectedValue);
+ static void PatchDataWithValueCheck(CodeLocationLabel label,
+ PatchedImmPtr newValue,
+ PatchedImmPtr expectedValue);
+ static void PatchWrite_Imm32(CodeLocationLabel label, Imm32 imm);
+
+ static void PatchWrite_NearCall(CodeLocationLabel start,
+ CodeLocationLabel toCall) {
+ Instruction* inst = (Instruction*)start.raw();
+ uint8_t* dest = toCall.raw();
+
+ // Overwrite whatever instruction used to be here with a call.
+ // Always use long jump for two reasons:
+ // - Jump has to be the same size because of PatchWrite_NearCallSize.
+ // - Return address has to be at the end of replaced block.
+ // Short jump wouldn't be more efficient.
+ // WriteLoad64Instructions will emit 6 instrs to load a addr.
+ Assembler::WriteLoad64Instructions(inst, ScratchRegister, (uint64_t)dest);
+ Instr jalr_ = JALR | (ra.code() << kRdShift) | (0x0 << kFunct3Shift) |
+ (ScratchRegister.code() << kRs1Shift) | (0x0 << kImm12Shift);
+ *reinterpret_cast<Instr*>(inst + 6 * kInstrSize) = jalr_;
+ }
+ static void WriteLoad64Instructions(Instruction* inst0, Register reg,
+ uint64_t value);
+
+ static uint32_t PatchWrite_NearCallSize() { return 7 * sizeof(uint32_t); }
+
+ static void TraceJumpRelocations(JSTracer* trc, JitCode* code,
+ CompactBufferReader& reader);
+ static void TraceDataRelocations(JSTracer* trc, JitCode* code,
+ CompactBufferReader& reader);
+
+ static void ToggleToJmp(CodeLocationLabel inst_);
+ static void ToggleToCmp(CodeLocationLabel inst_);
+ static void ToggleCall(CodeLocationLabel inst_, bool enable);
+
+ static void Bind(uint8_t* rawCode, const CodeLabel& label);
+ // label operations
+ void bind(Label* label, BufferOffset boff = BufferOffset());
+ void bind(CodeLabel* label) { label->target()->bind(currentOffset()); }
+ uint32_t currentOffset() { return nextOffset().getOffset(); }
+ void retarget(Label* label, Label* target);
+ static uint32_t NopSize() { return 4; }
+
+ static uintptr_t GetPointer(uint8_t* instPtr) {
+ Instruction* inst = (Instruction*)instPtr;
+ return Assembler::ExtractLoad64Value(inst);
+ }
+
+ static bool HasRoundInstruction(RoundingMode) { return false; }
+
+ void verifyHeapAccessDisassembly(uint32_t begin, uint32_t end,
+ const Disassembler::HeapAccess& heapAccess) {
+ MOZ_CRASH();
+ }
+
+ void setUnlimitedBuffer() { m_buffer.setUnlimited(); }
+
+ GeneralRegisterSet* GetScratchRegisterList() {
+ return &scratch_register_list_;
+ }
+
+ void EmitConstPoolWithJumpIfNeeded(size_t margin = 0) {}
+
+ // As opposed to x86/x64 version, the data relocation has to be executed
+ // before to recover the pointer, and not after.
+ void writeDataRelocation(ImmGCPtr ptr) {
+ // Raw GC pointer relocations and Value relocations both end up in
+ // TraceOneDataRelocation.
+ if (ptr.value) {
+ if (gc::IsInsideNursery(ptr.value)) {
+ embedsNurseryPointers_ = true;
+ }
+ dataRelocations_.writeUnsigned(nextOffset().getOffset());
+ }
+ }
+
+ bool appendRawCode(const uint8_t* code, size_t numBytes);
+
+ void assertNoGCThings() const {
+#ifdef DEBUG
+ MOZ_ASSERT(dataRelocations_.length() == 0);
+ for (auto& j : jumps_) {
+ MOZ_ASSERT(j.kind == RelocationKind::HARDCODED);
+ }
+#endif
+ }
+
+ // Assembler Pseudo Instructions (Tables 25.2, 25.3, RISC-V Unprivileged ISA)
+ void break_(uint32_t code, bool break_as_stop = false);
+ void nop();
+ void RV_li(Register rd, intptr_t imm);
+ static int RV_li_count(int64_t imm, bool is_get_temp_reg = false);
+ void GeneralLi(Register rd, int64_t imm);
+ static int GeneralLiCount(intptr_t imm, bool is_get_temp_reg = false);
+ void RecursiveLiImpl(Register rd, intptr_t imm);
+ void RecursiveLi(Register rd, intptr_t imm);
+ static int RecursiveLiCount(intptr_t imm);
+ static int RecursiveLiImplCount(intptr_t imm);
+ // Returns the number of instructions required to load the immediate
+ static int li_estimate(intptr_t imm, bool is_get_temp_reg = false);
+ // Loads an immediate, always using 8 instructions, regardless of the value,
+ // so that it can be modified later.
+ void li_constant(Register rd, intptr_t imm);
+ void li_ptr(Register rd, intptr_t imm);
+};
+
+class ABIArgGenerator {
+ public:
+ ABIArgGenerator()
+ : intRegIndex_(0), floatRegIndex_(0), stackOffset_(0), current_() {}
+ ABIArg next(MIRType);
+ ABIArg& current() { return current_; }
+ uint32_t stackBytesConsumedSoFar() const { return stackOffset_; }
+ void increaseStackOffset(uint32_t bytes) { stackOffset_ += bytes; }
+
+ protected:
+ unsigned intRegIndex_;
+ unsigned floatRegIndex_;
+ uint32_t stackOffset_;
+ ABIArg current_;
+};
+
+class BlockTrampolinePoolScope {
+ public:
+ explicit BlockTrampolinePoolScope(Assembler* assem, int margin)
+ : assem_(assem) {
+ assem_->enterNoPool(margin);
+ }
+ ~BlockTrampolinePoolScope() { assem_->leaveNoPool(); }
+
+ private:
+ Assembler* assem_;
+ BlockTrampolinePoolScope() = delete;
+ BlockTrampolinePoolScope(const BlockTrampolinePoolScope&) = delete;
+ BlockTrampolinePoolScope& operator=(const BlockTrampolinePoolScope&) = delete;
+};
+class UseScratchRegisterScope {
+ public:
+ explicit UseScratchRegisterScope(Assembler* assembler);
+ ~UseScratchRegisterScope();
+
+ Register Acquire();
+ bool hasAvailable() const;
+ void Include(const GeneralRegisterSet& list) {
+ *available_ = GeneralRegisterSet::Intersect(*available_, list);
+ }
+ void Exclude(const GeneralRegisterSet& list) {
+ *available_ = GeneralRegisterSet::Subtract(*available_, list);
+ }
+
+ private:
+ GeneralRegisterSet* available_;
+ GeneralRegisterSet old_available_;
+};
+
+// Class Operand represents a shifter operand in data processing instructions.
+class Operand {
+ public:
+ enum Tag { REG, FREG, MEM, IMM };
+ Operand(FloatRegister freg) : tag(FREG), rm_(freg.code()) {}
+
+ explicit Operand(Register base, Imm32 off)
+ : tag(MEM), rm_(base.code()), offset_(off.value) {}
+
+ explicit Operand(Register base, int32_t off)
+ : tag(MEM), rm_(base.code()), offset_(off) {}
+
+ explicit Operand(const Address& addr)
+ : tag(MEM), rm_(addr.base.code()), offset_(addr.offset) {}
+
+ explicit Operand(intptr_t immediate) : tag(IMM), rm_() { value_ = immediate; }
+ // Register.
+ Operand(const Register rm) : tag(REG), rm_(rm.code()) {}
+ // Return true if this is a register operand.
+ bool is_reg() const { return tag == REG; }
+ bool is_freg() const { return tag == FREG; }
+ bool is_mem() const { return tag == MEM; }
+ bool is_imm() const { return tag == IMM; }
+ inline intptr_t immediate() const {
+ MOZ_ASSERT(is_imm());
+ return value_;
+ }
+ bool IsImmediate() const { return !is_reg(); }
+ Register rm() const { return Register::FromCode(rm_); }
+ int32_t offset() const {
+ MOZ_ASSERT(is_mem());
+ return offset_;
+ }
+
+ FloatRegister toFReg() const {
+ MOZ_ASSERT(tag == FREG);
+ return FloatRegister::FromCode(rm_);
+ }
+
+ Register toReg() const {
+ MOZ_ASSERT(tag == REG);
+ return Register::FromCode(rm_);
+ }
+
+ Address toAddress() const {
+ MOZ_ASSERT(tag == MEM);
+ return Address(Register::FromCode(rm_), offset());
+ }
+
+ private:
+ Tag tag;
+ uint32_t rm_;
+ int32_t offset_;
+ intptr_t value_; // valid if rm_ == no_reg
+
+ friend class Assembler;
+ friend class MacroAssembler;
+};
+
+static const uint32_t NumIntArgRegs = 8;
+static const uint32_t NumFloatArgRegs = 8;
+static inline bool GetIntArgReg(uint32_t usedIntArgs, Register* out) {
+ if (usedIntArgs < NumIntArgRegs) {
+ *out = Register::FromCode(a0.code() + usedIntArgs);
+ return true;
+ }
+ return false;
+}
+
+static inline bool GetFloatArgReg(uint32_t usedFloatArgs, FloatRegister* out) {
+ if (usedFloatArgs < NumFloatArgRegs) {
+ *out = FloatRegister::FromCode(fa0.code() + usedFloatArgs);
+ return true;
+ }
+ return false;
+}
+
+// Get a register in which we plan to put a quantity that will be used as an
+// integer argument. This differs from GetIntArgReg in that if we have no more
+// actual argument registers to use we will fall back on using whatever
+// CallTempReg* don't overlap the argument registers, and only fail once those
+// run out too.
+static inline bool GetTempRegForIntArg(uint32_t usedIntArgs,
+ uint32_t usedFloatArgs, Register* out) {
+ // NOTE: We can't properly determine which regs are used if there are
+ // float arguments. If this is needed, we will have to guess.
+ MOZ_ASSERT(usedFloatArgs == 0);
+
+ if (GetIntArgReg(usedIntArgs, out)) {
+ return true;
+ }
+ // Unfortunately, we have to assume things about the point at which
+ // GetIntArgReg returns false, because we need to know how many registers it
+ // can allocate.
+ usedIntArgs -= NumIntArgRegs;
+ if (usedIntArgs >= NumCallTempNonArgRegs) {
+ return false;
+ }
+ *out = CallTempNonArgRegs[usedIntArgs];
+ return true;
+}
+
+} // namespace jit
+} // namespace js
+#endif /* jit_riscv64_Assembler_riscv64_h */
diff --git a/js/src/jit/riscv64/AssemblerMatInt.cpp b/js/src/jit/riscv64/AssemblerMatInt.cpp
new file mode 100644
index 0000000000..81c7fa7c40
--- /dev/null
+++ b/js/src/jit/riscv64/AssemblerMatInt.cpp
@@ -0,0 +1,217 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+//===- RISCVMatInt.cpp - Immediate materialisation -------------*- C++
+//-*--===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM
+// Exceptions. See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+#include "mozilla/MathAlgorithms.h"
+#include "mozilla/Maybe.h"
+
+#include "gc/Marking.h"
+#include "jit/AutoWritableJitCode.h"
+#include "jit/ExecutableAllocator.h"
+#include "jit/riscv64/Assembler-riscv64.h"
+#include "jit/riscv64/disasm/Disasm-riscv64.h"
+#include "vm/Realm.h"
+namespace js {
+namespace jit {
+void Assembler::RecursiveLi(Register rd, int64_t val) {
+ if (val > 0 && RecursiveLiImplCount(val) > 2) {
+ unsigned LeadingZeros = mozilla::CountLeadingZeroes64((uint64_t)val);
+ uint64_t ShiftedVal = (uint64_t)val << LeadingZeros;
+ int countFillZero = RecursiveLiImplCount(ShiftedVal) + 1;
+ if (countFillZero < RecursiveLiImplCount(val)) {
+ RecursiveLiImpl(rd, ShiftedVal);
+ srli(rd, rd, LeadingZeros);
+ return;
+ }
+ }
+ RecursiveLiImpl(rd, val);
+}
+
+int Assembler::RecursiveLiCount(int64_t val) {
+ if (val > 0 && RecursiveLiImplCount(val) > 2) {
+ unsigned LeadingZeros = mozilla::CountLeadingZeroes64((uint64_t)val);
+ uint64_t ShiftedVal = (uint64_t)val << LeadingZeros;
+ // Fill in the bits that will be shifted out with 1s. An example where
+ // this helps is trailing one masks with 32 or more ones. This will
+ // generate ADDI -1 and an SRLI.
+ int countFillZero = RecursiveLiImplCount(ShiftedVal) + 1;
+ if (countFillZero < RecursiveLiImplCount(val)) {
+ return countFillZero;
+ }
+ }
+ return RecursiveLiImplCount(val);
+}
+
+inline int64_t signExtend(uint64_t V, int N) {
+ return int64_t(V << (64 - N)) >> (64 - N);
+}
+
+void Assembler::RecursiveLiImpl(Register rd, int64_t Val) {
+ if (is_int32(Val)) {
+ // Depending on the active bits in the immediate Value v, the following
+ // instruction sequences are emitted:
+ //
+ // v == 0 : ADDI
+ // v[0,12) != 0 && v[12,32) == 0 : ADDI
+ // v[0,12) == 0 && v[12,32) != 0 : LUI
+ // v[0,32) != 0 : LUI+ADDI(W)
+ int64_t Hi20 = ((Val + 0x800) >> 12) & 0xFFFFF;
+ int64_t Lo12 = Val << 52 >> 52;
+
+ if (Hi20) {
+ lui(rd, (int32_t)Hi20);
+ }
+
+ if (Lo12 || Hi20 == 0) {
+ if (Hi20) {
+ addiw(rd, rd, Lo12);
+ } else {
+ addi(rd, zero_reg, Lo12);
+ }
+ }
+ return;
+ }
+
+ // In the worst case, for a full 64-bit constant, a sequence of 8
+ // instructions (i.e., LUI+ADDIW+SLLI+ADDI+SLLI+ADDI+SLLI+ADDI) has to be
+ // emitted. Note that the first two instructions (LUI+ADDIW) can contribute
+ // up to 32 bits while the following ADDI instructions contribute up to 12
+ // bits each.
+ //
+ // On the first glance, implementing this seems to be possible by simply
+ // emitting the most significant 32 bits (LUI+ADDIW) followed by as many
+ // left shift (SLLI) and immediate additions (ADDI) as needed. However, due
+ // to the fact that ADDI performs a sign extended addition, doing it like
+ // that would only be possible when at most 11 bits of the ADDI instructions
+ // are used. Using all 12 bits of the ADDI instructions, like done by GAS,
+ // actually requires that the constant is processed starting with the least
+ // significant bit.
+ //
+ // In the following, constants are processed from LSB to MSB but instruction
+ // emission is performed from MSB to LSB by recursively calling
+ // generateInstSeq. In each recursion, first the lowest 12 bits are removed
+ // from the constant and the optimal shift amount, which can be greater than
+ // 12 bits if the constant is sparse, is determined. Then, the shifted
+ // remaining constant is processed recursively and gets emitted as soon as
+ // it fits into 32 bits. The emission of the shifts and additions is
+ // subsequently performed when the recursion returns.
+
+ int64_t Lo12 = Val << 52 >> 52;
+ int64_t Hi52 = ((uint64_t)Val + 0x800ull) >> 12;
+ int ShiftAmount = 12 + mozilla::CountTrailingZeroes64((uint64_t)Hi52);
+ Hi52 = signExtend(Hi52 >> (ShiftAmount - 12), 64 - ShiftAmount);
+
+ // If the remaining bits don't fit in 12 bits, we might be able to reduce
+ // the shift amount in order to use LUI which will zero the lower 12 bits.
+ bool Unsigned = false;
+ if (ShiftAmount > 12 && !is_int12(Hi52)) {
+ if (is_int32((uint64_t)Hi52 << 12)) {
+ // Reduce the shift amount and add zeros to the LSBs so it will match
+ // LUI.
+ ShiftAmount -= 12;
+ Hi52 = (uint64_t)Hi52 << 12;
+ }
+ }
+ RecursiveLi(rd, Hi52);
+
+ if (Unsigned) {
+ } else {
+ slli(rd, rd, ShiftAmount);
+ }
+ if (Lo12) {
+ addi(rd, rd, Lo12);
+ }
+}
+
+int Assembler::RecursiveLiImplCount(int64_t Val) {
+ int count = 0;
+ if (is_int32(Val)) {
+ // Depending on the active bits in the immediate Value v, the following
+ // instruction sequences are emitted:
+ //
+ // v == 0 : ADDI
+ // v[0,12) != 0 && v[12,32) == 0 : ADDI
+ // v[0,12) == 0 && v[12,32) != 0 : LUI
+ // v[0,32) != 0 : LUI+ADDI(W)
+ int64_t Hi20 = ((Val + 0x800) >> 12) & 0xFFFFF;
+ int64_t Lo12 = Val << 52 >> 52;
+
+ if (Hi20) {
+ // lui(rd, (int32_t)Hi20);
+ count++;
+ }
+
+ if (Lo12 || Hi20 == 0) {
+ // unsigned AddiOpc = (IsRV64 && Hi20) ? RISCV::ADDIW : RISCV::ADDI;
+ // Res.push_back(RISCVMatInt::Inst(AddiOpc, Lo12));
+ count++;
+ }
+ return count;
+ }
+
+ // In the worst case, for a full 64-bit constant, a sequence of 8
+ // instructions (i.e., LUI+ADDIW+SLLI+ADDI+SLLI+ADDI+SLLI+ADDI) has to be
+ // emitted. Note that the first two instructions (LUI+ADDIW) can contribute
+ // up to 32 bits while the following ADDI instructions contribute up to 12
+ // bits each.
+ //
+ // On the first glance, implementing this seems to be possible by simply
+ // emitting the most significant 32 bits (LUI+ADDIW) followed by as many
+ // left shift (SLLI) and immediate additions (ADDI) as needed. However, due
+ // to the fact that ADDI performs a sign extended addition, doing it like
+ // that would only be possible when at most 11 bits of the ADDI instructions
+ // are used. Using all 12 bits of the ADDI instructions, like done by GAS,
+ // actually requires that the constant is processed starting with the least
+ // significant bit.
+ //
+ // In the following, constants are processed from LSB to MSB but instruction
+ // emission is performed from MSB to LSB by recursively calling
+ // generateInstSeq. In each recursion, first the lowest 12 bits are removed
+ // from the constant and the optimal shift amount, which can be greater than
+ // 12 bits if the constant is sparse, is determined. Then, the shifted
+ // remaining constant is processed recursively and gets emitted as soon as
+ // it fits into 32 bits. The emission of the shifts and additions is
+ // subsequently performed when the recursion returns.
+
+ int64_t Lo12 = Val << 52 >> 52;
+ int64_t Hi52 = ((uint64_t)Val + 0x800ull) >> 12;
+ int ShiftAmount = 12 + mozilla::CountTrailingZeroes64((uint64_t)Hi52);
+ Hi52 = signExtend(Hi52 >> (ShiftAmount - 12), 64 - ShiftAmount);
+
+ // If the remaining bits don't fit in 12 bits, we might be able to reduce
+ // the shift amount in order to use LUI which will zero the lower 12 bits.
+ bool Unsigned = false;
+ if (ShiftAmount > 12 && !is_int12(Hi52)) {
+ if (is_int32((uint64_t)Hi52 << 12)) {
+ // Reduce the shift amount and add zeros to the LSBs so it will match
+ // LUI.
+ ShiftAmount -= 12;
+ Hi52 = (uint64_t)Hi52 << 12;
+ }
+ }
+
+ count += RecursiveLiImplCount(Hi52);
+
+ if (Unsigned) {
+ } else {
+ // slli(rd, rd, ShiftAmount);
+ count++;
+ }
+ if (Lo12) {
+ // addi(rd, rd, Lo12);
+ count++;
+ }
+ return count;
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/riscv64/CodeGenerator-riscv64.cpp b/js/src/jit/riscv64/CodeGenerator-riscv64.cpp
new file mode 100644
index 0000000000..98bb5015cb
--- /dev/null
+++ b/js/src/jit/riscv64/CodeGenerator-riscv64.cpp
@@ -0,0 +1,2871 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/riscv64/CodeGenerator-riscv64.h"
+
+#include "mozilla/MathAlgorithms.h"
+
+#include "jsnum.h"
+
+#include "jit/CodeGenerator.h"
+#include "jit/InlineScriptTree.h"
+#include "jit/JitRuntime.h"
+#include "jit/MIR.h"
+#include "jit/MIRGraph.h"
+#include "vm/JSContext.h"
+#include "vm/Realm.h"
+#include "vm/Shape.h"
+
+#include "jit/shared/CodeGenerator-shared-inl.h"
+#include "vm/JSScript-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using JS::GenericNaN;
+using mozilla::FloorLog2;
+using mozilla::NegativeInfinity;
+
+// shared
+CodeGeneratorRiscv64::CodeGeneratorRiscv64(MIRGenerator* gen, LIRGraph* graph,
+ MacroAssembler* masm)
+ : CodeGeneratorShared(gen, graph, masm) {}
+
+Operand CodeGeneratorRiscv64::ToOperand(const LAllocation& a) {
+ if (a.isGeneralReg()) {
+ return Operand(a.toGeneralReg()->reg());
+ }
+ if (a.isFloatReg()) {
+ return Operand(a.toFloatReg()->reg());
+ }
+ return Operand(ToAddress(a));
+}
+
+Operand CodeGeneratorRiscv64::ToOperand(const LAllocation* a) {
+ return ToOperand(*a);
+}
+
+Operand CodeGeneratorRiscv64::ToOperand(const LDefinition* def) {
+ return ToOperand(def->output());
+}
+
+#ifdef JS_PUNBOX64
+Operand CodeGeneratorRiscv64::ToOperandOrRegister64(
+ const LInt64Allocation input) {
+ return ToOperand(input.value());
+}
+#else
+Register64 CodeGeneratorRiscv64::ToOperandOrRegister64(
+ const LInt64Allocation input) {
+ return ToRegister64(input);
+}
+#endif
+
+void CodeGeneratorRiscv64::branchToBlock(FloatFormat fmt, FloatRegister lhs,
+ FloatRegister rhs, MBasicBlock* mir,
+ Assembler::DoubleCondition cond) {
+ // Skip past trivial blocks.
+ Label* label = skipTrivialBlocks(mir)->lir()->label();
+ if (fmt == DoubleFloat) {
+ masm.branchDouble(cond, lhs, rhs, label);
+ } else {
+ masm.branchFloat(cond, lhs, rhs, label);
+ }
+}
+
+void OutOfLineBailout::accept(CodeGeneratorRiscv64* codegen) {
+ codegen->visitOutOfLineBailout(this);
+}
+
+MoveOperand CodeGeneratorRiscv64::toMoveOperand(LAllocation a) const {
+ if (a.isGeneralReg()) {
+ return MoveOperand(ToRegister(a));
+ }
+ if (a.isFloatReg()) {
+ return MoveOperand(ToFloatRegister(a));
+ }
+ MoveOperand::Kind kind = a.isStackArea() ? MoveOperand::Kind::EffectiveAddress
+ : MoveOperand::Kind::Memory;
+ Address address = ToAddress(a);
+ MOZ_ASSERT((address.offset & 3) == 0);
+
+ return MoveOperand(address, kind);
+}
+
+void CodeGeneratorRiscv64::bailoutFrom(Label* label, LSnapshot* snapshot) {
+ MOZ_ASSERT_IF(!masm.oom(), label->used());
+ MOZ_ASSERT_IF(!masm.oom(), !label->bound());
+
+ encode(snapshot);
+
+ InlineScriptTree* tree = snapshot->mir()->block()->trackedTree();
+ OutOfLineBailout* ool = new (alloc()) OutOfLineBailout(snapshot);
+ addOutOfLineCode(ool,
+ new (alloc()) BytecodeSite(tree, tree->script()->code()));
+
+ masm.retarget(label, ool->entry());
+}
+
+void CodeGeneratorRiscv64::bailout(LSnapshot* snapshot) {
+ Label label;
+ masm.jump(&label);
+ bailoutFrom(&label, snapshot);
+}
+
+bool CodeGeneratorRiscv64::generateOutOfLineCode() {
+ if (!CodeGeneratorShared::generateOutOfLineCode()) {
+ return false;
+ }
+
+ if (deoptLabel_.used()) {
+ // All non-table-based bailouts will go here.
+ masm.bind(&deoptLabel_);
+
+ // Push the frame size, so the handler can recover the IonScript.
+ // Frame size is stored in 'ra' and pushed by GenerateBailoutThunk
+ // We have to use 'ra' because generateBailoutTable will implicitly do
+ // the same.
+ masm.move32(Imm32(frameSize()), ra);
+
+ TrampolinePtr handler = gen->jitRuntime()->getGenericBailoutHandler();
+ masm.jump(handler);
+ }
+
+ return !masm.oom();
+}
+
+class js::jit::OutOfLineTableSwitch
+ : public OutOfLineCodeBase<CodeGeneratorRiscv64> {
+ MTableSwitch* mir_;
+ CodeLabel jumpLabel_;
+
+ void accept(CodeGeneratorRiscv64* codegen) {
+ codegen->visitOutOfLineTableSwitch(this);
+ }
+
+ public:
+ OutOfLineTableSwitch(MTableSwitch* mir) : mir_(mir) {}
+
+ MTableSwitch* mir() const { return mir_; }
+
+ CodeLabel* jumpLabel() { return &jumpLabel_; }
+};
+
+void CodeGeneratorRiscv64::emitTableSwitchDispatch(MTableSwitch* mir,
+ Register index,
+ Register base) {
+ Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
+
+ // Lower value with low value
+ if (mir->low() != 0) {
+ masm.subPtr(Imm32(mir->low()), index);
+ }
+
+ // Jump to default case if input is out of range
+ int32_t cases = mir->numCases();
+ masm.branchPtr(Assembler::AboveOrEqual, index, ImmWord(cases), defaultcase);
+
+ // To fill in the CodeLabels for the case entries, we need to first
+ // generate the case entries (we don't yet know their offsets in the
+ // instruction stream).
+ OutOfLineTableSwitch* ool = new (alloc()) OutOfLineTableSwitch(mir);
+ addOutOfLineCode(ool, mir);
+
+ // Compute the position where a pointer to the right case stands.
+ masm.ma_li(base, ool->jumpLabel());
+
+ BaseIndex pointer(base, index, ScalePointer);
+
+ // Jump to the right case
+ masm.branchToComputedAddress(pointer);
+}
+
+void CodeGenerator::visitWasmHeapBase(LWasmHeapBase* ins) {
+ MOZ_ASSERT(ins->instance()->isBogus());
+ masm.movePtr(HeapReg, ToRegister(ins->output()));
+}
+
+template <typename T>
+void CodeGeneratorRiscv64::emitWasmLoad(T* lir) {
+ const MWasmLoad* mir = lir->mir();
+ UseScratchRegisterScope temps(&masm);
+ Register scratch2 = temps.Acquire();
+
+ Register ptr = ToRegister(lir->ptr());
+ Register ptrScratch = InvalidReg;
+ if (!lir->ptrCopy()->isBogusTemp()) {
+ ptrScratch = ToRegister(lir->ptrCopy());
+ }
+
+ if (mir->base()->type() == MIRType::Int32) {
+ masm.move32To64ZeroExtend(ptr, Register64(scratch2));
+ ptr = scratch2;
+ ptrScratch = ptrScratch != InvalidReg ? scratch2 : InvalidReg;
+ }
+
+ // ptr is a GPR and is either a 32-bit value zero-extended to 64-bit, or a
+ // true 64-bit value.
+ masm.wasmLoad(mir->access(), HeapReg, ptr, ptrScratch,
+ ToAnyRegister(lir->output()));
+}
+
+template <typename T>
+void CodeGeneratorRiscv64::emitWasmStore(T* lir) {
+ const MWasmStore* mir = lir->mir();
+ UseScratchRegisterScope temps(&masm);
+ Register scratch2 = temps.Acquire();
+
+ Register ptr = ToRegister(lir->ptr());
+ Register ptrScratch = InvalidReg;
+ if (!lir->ptrCopy()->isBogusTemp()) {
+ ptrScratch = ToRegister(lir->ptrCopy());
+ }
+
+ if (mir->base()->type() == MIRType::Int32) {
+ masm.move32To64ZeroExtend(ptr, Register64(scratch2));
+ ptr = scratch2;
+ ptrScratch = ptrScratch != InvalidReg ? scratch2 : InvalidReg;
+ }
+
+ // ptr is a GPR and is either a 32-bit value zero-extended to 64-bit, or a
+ // true 64-bit value.
+ masm.wasmStore(mir->access(), ToAnyRegister(lir->value()), HeapReg, ptr,
+ ptrScratch);
+}
+
+void CodeGeneratorRiscv64::generateInvalidateEpilogue() {
+ // Ensure that there is enough space in the buffer for the OsiPoint
+ // patching to occur. Otherwise, we could overwrite the invalidation
+ // epilogue
+ for (size_t i = 0; i < sizeof(void*); i += Assembler::NopSize()) {
+ masm.nop();
+ }
+
+ masm.bind(&invalidate_);
+
+ // Push the return address of the point that we bailed out at to the stack
+ masm.Push(ra);
+
+ // Push the Ion script onto the stack (when we determine what that
+ // pointer is).
+ invalidateEpilogueData_ = masm.pushWithPatch(ImmWord(uintptr_t(-1)));
+
+ // Jump to the invalidator which will replace the current frame.
+ TrampolinePtr thunk = gen->jitRuntime()->getInvalidationThunk();
+
+ masm.jump(thunk);
+}
+
+void CodeGeneratorRiscv64::visitOutOfLineBailout(OutOfLineBailout* ool) {
+ // Push snapshotOffset and make sure stack is aligned.
+ masm.subPtr(Imm32(sizeof(Value)), StackPointer);
+ masm.storePtr(ImmWord(ool->snapshot()->snapshotOffset()),
+ Address(StackPointer, 0));
+
+ masm.jump(&deoptLabel_);
+}
+
+void CodeGeneratorRiscv64::visitOutOfLineTableSwitch(
+ OutOfLineTableSwitch* ool) {
+ MTableSwitch* mir = ool->mir();
+ masm.nop();
+ masm.haltingAlign(sizeof(void*));
+ masm.bind(ool->jumpLabel());
+ masm.addCodeLabel(*ool->jumpLabel());
+ BlockTrampolinePoolScope block_trampoline_pool(
+ &masm, mir->numCases() * sizeof(uint64_t));
+ for (size_t i = 0; i < mir->numCases(); i++) {
+ LBlock* caseblock = skipTrivialBlocks(mir->getCase(i))->lir();
+ Label* caseheader = caseblock->label();
+ uint32_t caseoffset = caseheader->offset();
+
+ // The entries of the jump table need to be absolute addresses and thus
+ // must be patched after codegen is finished.
+ CodeLabel cl;
+ masm.writeCodePointer(&cl);
+ cl.target()->bind(caseoffset);
+ masm.addCodeLabel(cl);
+ }
+}
+
+void CodeGeneratorRiscv64::visitOutOfLineWasmTruncateCheck(
+ OutOfLineWasmTruncateCheck* ool) {
+ FloatRegister input = ool->input();
+ Register output = ool->output();
+ Register64 output64 = ool->output64();
+ MIRType fromType = ool->fromType();
+ MIRType toType = ool->toType();
+ Label* oolRejoin = ool->rejoin();
+ TruncFlags flags = ool->flags();
+ wasm::BytecodeOffset off = ool->bytecodeOffset();
+
+ if (fromType == MIRType::Float32) {
+ if (toType == MIRType::Int32) {
+ masm.oolWasmTruncateCheckF32ToI32(input, output, flags, off, oolRejoin);
+ } else if (toType == MIRType::Int64) {
+ masm.oolWasmTruncateCheckF32ToI64(input, output64, flags, off, oolRejoin);
+ } else {
+ MOZ_CRASH("unexpected type");
+ }
+ } else if (fromType == MIRType::Double) {
+ if (toType == MIRType::Int32) {
+ masm.oolWasmTruncateCheckF64ToI32(input, output, flags, off, oolRejoin);
+ } else if (toType == MIRType::Int64) {
+ masm.oolWasmTruncateCheckF64ToI64(input, output64, flags, off, oolRejoin);
+ } else {
+ MOZ_CRASH("unexpected type");
+ }
+ } else {
+ MOZ_CRASH("unexpected type");
+ }
+}
+
+ValueOperand CodeGeneratorRiscv64::ToValue(LInstruction* ins, size_t pos) {
+ return ValueOperand(ToRegister(ins->getOperand(pos)));
+}
+
+ValueOperand CodeGeneratorRiscv64::ToTempValue(LInstruction* ins, size_t pos) {
+ return ValueOperand(ToRegister(ins->getTemp(pos)));
+}
+
+void CodeGenerator::visitBox(LBox* box) {
+ const LAllocation* in = box->getOperand(0);
+ ValueOperand result = ToOutValue(box);
+
+ masm.moveValue(TypedOrValueRegister(box->type(), ToAnyRegister(in)), result);
+}
+
+void CodeGenerator::visitUnbox(LUnbox* unbox) {
+ MUnbox* mir = unbox->mir();
+
+ Register result = ToRegister(unbox->output());
+
+ if (mir->fallible()) {
+ const ValueOperand value = ToValue(unbox, LUnbox::Input);
+ Label bail;
+ switch (mir->type()) {
+ case MIRType::Int32:
+ masm.fallibleUnboxInt32(value, result, &bail);
+ break;
+ case MIRType::Boolean:
+ masm.fallibleUnboxBoolean(value, result, &bail);
+ break;
+ case MIRType::Object:
+ masm.fallibleUnboxObject(value, result, &bail);
+ break;
+ case MIRType::String:
+ masm.fallibleUnboxString(value, result, &bail);
+ break;
+ case MIRType::Symbol:
+ masm.fallibleUnboxSymbol(value, result, &bail);
+ break;
+ case MIRType::BigInt:
+ masm.fallibleUnboxBigInt(value, result, &bail);
+ break;
+ default:
+ MOZ_CRASH("Given MIRType cannot be unboxed.");
+ }
+ bailoutFrom(&bail, unbox->snapshot());
+ return;
+ }
+
+ LAllocation* input = unbox->getOperand(LUnbox::Input);
+ if (input->isRegister()) {
+ Register inputReg = ToRegister(input);
+ switch (mir->type()) {
+ case MIRType::Int32:
+ masm.unboxInt32(inputReg, result);
+ break;
+ case MIRType::Boolean:
+ masm.unboxBoolean(inputReg, result);
+ break;
+ case MIRType::Object:
+ masm.unboxObject(inputReg, result);
+ break;
+ case MIRType::String:
+ masm.unboxString(inputReg, result);
+ break;
+ case MIRType::Symbol:
+ masm.unboxSymbol(inputReg, result);
+ break;
+ case MIRType::BigInt:
+ masm.unboxBigInt(inputReg, result);
+ break;
+ default:
+ MOZ_CRASH("Given MIRType cannot be unboxed.");
+ }
+ return;
+ }
+
+ Address inputAddr = ToAddress(input);
+ switch (mir->type()) {
+ case MIRType::Int32:
+ masm.unboxInt32(inputAddr, result);
+ break;
+ case MIRType::Boolean:
+ masm.unboxBoolean(inputAddr, result);
+ break;
+ case MIRType::Object:
+ masm.unboxObject(inputAddr, result);
+ break;
+ case MIRType::String:
+ masm.unboxString(inputAddr, result);
+ break;
+ case MIRType::Symbol:
+ masm.unboxSymbol(inputAddr, result);
+ break;
+ case MIRType::BigInt:
+ masm.unboxBigInt(inputAddr, result);
+ break;
+ default:
+ MOZ_CRASH("Given MIRType cannot be unboxed.");
+ }
+}
+
+void CodeGeneratorRiscv64::splitTagForTest(const ValueOperand& value,
+ ScratchTagScope& tag) {
+ masm.splitTag(value.valueReg(), tag);
+}
+
+void CodeGenerator::visitCompareI64(LCompareI64* lir) {
+ MCompare* mir = lir->mir();
+ const mozilla::DebugOnly<MCompare::CompareType> type = mir->compareType();
+ MOZ_ASSERT(type == MCompare::Compare_Int64 ||
+ type == MCompare::Compare_UInt64);
+
+ const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
+ Register lhsReg = ToRegister64(lhs).reg;
+ Register output = ToRegister(lir->output());
+ bool isSigned = mir->compareType() == MCompare::Compare_Int64;
+ Assembler::Condition cond = JSOpToCondition(lir->jsop(), isSigned);
+
+ if (IsConstant(rhs)) {
+ masm.cmpPtrSet(cond, lhsReg, ImmWord(ToInt64(rhs)), output);
+ } else if (rhs.value().isGeneralReg()) {
+ masm.cmpPtrSet(cond, lhsReg, ToRegister64(rhs).reg, output);
+ } else {
+ masm.cmpPtrSet(cond, lhsReg, ToAddress(rhs.value()), output);
+ }
+}
+
+void CodeGenerator::visitCompareI64AndBranch(LCompareI64AndBranch* lir) {
+ MCompare* mir = lir->cmpMir();
+ const mozilla::DebugOnly<MCompare::CompareType> type = mir->compareType();
+ MOZ_ASSERT(type == MCompare::Compare_Int64 ||
+ type == MCompare::Compare_UInt64);
+
+ const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
+ Register lhsReg = ToRegister64(lhs).reg;
+ bool isSigned = mir->compareType() == MCompare::Compare_Int64;
+ Assembler::Condition cond = JSOpToCondition(lir->jsop(), isSigned);
+
+ if (IsConstant(rhs)) {
+ emitBranch(lhsReg, ImmWord(ToInt64(rhs)), cond, lir->ifTrue(),
+ lir->ifFalse());
+ } else if (rhs.value().isGeneralReg()) {
+ emitBranch(lhsReg, ToRegister64(rhs).reg, cond, lir->ifTrue(),
+ lir->ifFalse());
+ } else {
+ emitBranch(lhsReg, ToAddress(rhs.value()), cond, lir->ifTrue(),
+ lir->ifFalse());
+ }
+}
+
+void CodeGenerator::visitCompare(LCompare* comp) {
+ MCompare* mir = comp->mir();
+ Assembler::Condition cond = JSOpToCondition(mir->compareType(), comp->jsop());
+ const LAllocation* left = comp->getOperand(0);
+ const LAllocation* right = comp->getOperand(1);
+ const LDefinition* def = comp->getDef(0);
+
+ if (mir->compareType() == MCompare::Compare_Object ||
+ mir->compareType() == MCompare::Compare_Symbol ||
+ mir->compareType() == MCompare::Compare_UIntPtr ||
+ mir->compareType() == MCompare::Compare_RefOrNull) {
+ if (right->isConstant()) {
+ MOZ_ASSERT(mir->compareType() == MCompare::Compare_UIntPtr);
+ masm.cmpPtrSet(cond, ToRegister(left), Imm32(ToInt32(right)),
+ ToRegister(def));
+ } else if (right->isGeneralReg()) {
+ masm.cmpPtrSet(cond, ToRegister(left), ToRegister(right),
+ ToRegister(def));
+ } else {
+ masm.cmpPtrSet(cond, ToRegister(left), ToAddress(right), ToRegister(def));
+ }
+ return;
+ }
+
+ if (right->isConstant()) {
+ masm.cmp32Set(cond, ToRegister(left), Imm32(ToInt32(right)),
+ ToRegister(def));
+ } else if (right->isGeneralReg()) {
+ masm.cmp32Set(cond, ToRegister(left), ToRegister(right), ToRegister(def));
+ } else {
+ masm.cmp32Set(cond, ToRegister(left), ToAddress(right), ToRegister(def));
+ }
+}
+
+void CodeGenerator::visitCompareAndBranch(LCompareAndBranch* comp) {
+ const MCompare* mir = comp->cmpMir();
+ const MCompare::CompareType type = mir->compareType();
+ const LAllocation* lhs = comp->left();
+ const LAllocation* rhs = comp->right();
+ MBasicBlock* ifTrue = comp->ifTrue();
+ MBasicBlock* ifFalse = comp->ifFalse();
+ Register lhsReg = ToRegister(lhs);
+ const Assembler::Condition cond = JSOpToCondition(type, comp->jsop());
+
+ if (type == MCompare::Compare_Object || type == MCompare::Compare_Symbol ||
+ type == MCompare::Compare_UIntPtr ||
+ type == MCompare::Compare_RefOrNull) {
+ if (rhs->isConstant()) {
+ emitBranch(ToRegister(lhs), Imm32(ToInt32(rhs)), cond, ifTrue, ifFalse);
+ } else if (rhs->isGeneralReg()) {
+ emitBranch(lhsReg, ToRegister(rhs), cond, ifTrue, ifFalse);
+ } else {
+ MOZ_CRASH("NYI");
+ }
+ return;
+ }
+
+ if (rhs->isConstant()) {
+ emitBranch(lhsReg, Imm32(ToInt32(comp->right())), cond, ifTrue, ifFalse);
+ } else if (comp->right()->isGeneralReg()) {
+ emitBranch(lhsReg, ToRegister(rhs), cond, ifTrue, ifFalse);
+ } else {
+ // TODO(loong64): emitBranch with 32-bit comparision
+ ScratchRegisterScope scratch(masm);
+ masm.load32(ToAddress(rhs), scratch);
+ emitBranch(lhsReg, Register(scratch), cond, ifTrue, ifFalse);
+ }
+}
+
+void CodeGenerator::visitDivOrModI64(LDivOrModI64* lir) {
+ Register lhs = ToRegister(lir->lhs());
+ Register rhs = ToRegister(lir->rhs());
+ Register output = ToRegister(lir->output());
+
+ Label done;
+
+ // Handle divide by zero.
+ if (lir->canBeDivideByZero()) {
+ Label nonZero;
+ masm.ma_b(rhs, rhs, &nonZero, Assembler::NonZero);
+ masm.wasmTrap(wasm::Trap::IntegerDivideByZero, lir->bytecodeOffset());
+ masm.bind(&nonZero);
+ }
+
+ // Handle an integer overflow exception from INT64_MIN / -1.
+ if (lir->canBeNegativeOverflow()) {
+ Label notOverflow;
+ masm.branchPtr(Assembler::NotEqual, lhs, ImmWord(INT64_MIN), &notOverflow);
+ masm.branchPtr(Assembler::NotEqual, rhs, ImmWord(-1), &notOverflow);
+ if (lir->mir()->isMod()) {
+ masm.ma_xor(output, output, Operand(output));
+ } else {
+ masm.wasmTrap(wasm::Trap::IntegerOverflow, lir->bytecodeOffset());
+ }
+ masm.jump(&done);
+ masm.bind(&notOverflow);
+ }
+
+ if (lir->mir()->isMod()) {
+ masm.ma_mod64(output, lhs, rhs);
+ } else {
+ masm.ma_div64(output, lhs, rhs);
+ }
+
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitUDivOrModI64(LUDivOrModI64* lir) {
+ Register lhs = ToRegister(lir->lhs());
+ Register rhs = ToRegister(lir->rhs());
+ Register output = ToRegister(lir->output());
+
+ Label done;
+
+ // Prevent divide by zero.
+ if (lir->canBeDivideByZero()) {
+ Label nonZero;
+ masm.ma_b(rhs, rhs, &nonZero, Assembler::NonZero);
+ masm.wasmTrap(wasm::Trap::IntegerDivideByZero, lir->bytecodeOffset());
+ masm.bind(&nonZero);
+ }
+
+ if (lir->mir()->isMod()) {
+ masm.ma_modu64(output, lhs, rhs);
+ } else {
+ masm.ma_divu64(output, lhs, rhs);
+ }
+
+ masm.bind(&done);
+}
+
+void CodeGeneratorRiscv64::emitBigIntDiv(LBigIntDiv* ins, Register dividend,
+ Register divisor, Register output,
+ Label* fail) {
+ // Callers handle division by zero and integer overflow.
+ masm.ma_div64(/* result= */ dividend, dividend, divisor);
+
+ // Create and return the result.
+ masm.newGCBigInt(output, divisor, initialBigIntHeap(), fail);
+ masm.initializeBigInt(output, dividend);
+}
+
+void CodeGeneratorRiscv64::emitBigIntMod(LBigIntMod* ins, Register dividend,
+ Register divisor, Register output,
+ Label* fail) {
+ // Callers handle division by zero and integer overflow.
+ masm.ma_mod64(/* result= */ dividend, dividend, divisor);
+
+ // Create and return the result.
+ masm.newGCBigInt(output, divisor, initialBigIntHeap(), fail);
+ masm.initializeBigInt(output, dividend);
+}
+
+void CodeGenerator::visitWasmLoadI64(LWasmLoadI64* lir) {
+ const MWasmLoad* mir = lir->mir();
+
+ Register ptrScratch = InvalidReg;
+ if (!lir->ptrCopy()->isBogusTemp()) {
+ ptrScratch = ToRegister(lir->ptrCopy());
+ }
+
+ masm.wasmLoadI64(mir->access(), HeapReg, ToRegister(lir->ptr()), ptrScratch,
+ ToOutRegister64(lir));
+}
+
+void CodeGenerator::visitWasmStoreI64(LWasmStoreI64* lir) {
+ const MWasmStore* mir = lir->mir();
+
+ Register ptrScratch = InvalidReg;
+ if (!lir->ptrCopy()->isBogusTemp()) {
+ ptrScratch = ToRegister(lir->ptrCopy());
+ }
+
+ masm.wasmStoreI64(mir->access(), ToRegister64(lir->value()), HeapReg,
+ ToRegister(lir->ptr()), ptrScratch);
+}
+
+void CodeGenerator::visitWasmSelectI64(LWasmSelectI64* lir) {
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
+
+ Register cond = ToRegister(lir->condExpr());
+ const LInt64Allocation falseExpr = lir->falseExpr();
+
+ Register64 out = ToOutRegister64(lir);
+ MOZ_ASSERT(ToRegister64(lir->trueExpr()) == out,
+ "true expr is reused for input");
+
+ if (falseExpr.value().isRegister()) {
+ masm.moveIfZero(out.reg, ToRegister(falseExpr.value()), cond);
+ } else {
+ Label done;
+ masm.ma_b(cond, cond, &done, Assembler::NonZero, ShortJump);
+ masm.loadPtr(ToAddress(falseExpr.value()), out.reg);
+ masm.bind(&done);
+ }
+}
+
+void CodeGenerator::visitWasmReinterpretFromI64(LWasmReinterpretFromI64* lir) {
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Double);
+ MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Int64);
+ masm.fmv_d_x(ToFloatRegister(lir->output()), ToRegister(lir->input()));
+}
+
+void CodeGenerator::visitWasmReinterpretToI64(LWasmReinterpretToI64* lir) {
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
+ MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Double);
+ masm.fmv_x_d(ToRegister(lir->output()), ToFloatRegister(lir->input()));
+}
+
+void CodeGenerator::visitExtendInt32ToInt64(LExtendInt32ToInt64* lir) {
+ const LAllocation* input = lir->getOperand(0);
+ Register output = ToRegister(lir->output());
+
+ if (lir->mir()->isUnsigned()) {
+ masm.move32To64ZeroExtend(ToRegister(input), Register64(output));
+ } else {
+ masm.slliw(output, ToRegister(input), 0);
+ }
+}
+
+void CodeGenerator::visitWrapInt64ToInt32(LWrapInt64ToInt32* lir) {
+ const LAllocation* input = lir->getOperand(0);
+ Register output = ToRegister(lir->output());
+
+ if (lir->mir()->bottomHalf()) {
+ if (input->isMemory()) {
+ masm.load32(ToAddress(input), output);
+ } else {
+ masm.slliw(output, ToRegister(input), 0);
+ }
+ } else {
+ MOZ_CRASH("Not implemented.");
+ }
+}
+
+void CodeGenerator::visitSignExtendInt64(LSignExtendInt64* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register64 output = ToOutRegister64(lir);
+ switch (lir->mode()) {
+ case MSignExtendInt64::Byte:
+ masm.move32To64SignExtend(input.reg, output);
+ masm.move8SignExtend(output.reg, output.reg);
+ break;
+ case MSignExtendInt64::Half:
+ masm.move32To64SignExtend(input.reg, output);
+ masm.move16SignExtend(output.reg, output.reg);
+ break;
+ case MSignExtendInt64::Word:
+ masm.move32To64SignExtend(input.reg, output);
+ break;
+ }
+}
+
+void CodeGenerator::visitWasmExtendU32Index(LWasmExtendU32Index* lir) {
+ Register input = ToRegister(lir->input());
+ Register output = ToRegister(lir->output());
+ MOZ_ASSERT(input == output);
+ masm.move32To64ZeroExtend(input, Register64(output));
+}
+
+void CodeGenerator::visitWasmWrapU32Index(LWasmWrapU32Index* lir) {
+ Register input = ToRegister(lir->input());
+ Register output = ToRegister(lir->output());
+ MOZ_ASSERT(input == output);
+ masm.move64To32(Register64(input), output);
+}
+
+void CodeGenerator::visitClzI64(LClzI64* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register64 output = ToOutRegister64(lir);
+ masm.clz64(input, output.reg);
+}
+
+void CodeGenerator::visitCtzI64(LCtzI64* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register64 output = ToOutRegister64(lir);
+ masm.ctz64(input, output.reg);
+}
+
+void CodeGenerator::visitNotI64(LNotI64* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register output = ToRegister(lir->output());
+
+ masm.ma_cmp_set(output, input.reg, zero, Assembler::Equal);
+}
+
+void CodeGenerator::visitWasmTruncateToInt64(LWasmTruncateToInt64* lir) {
+ FloatRegister input = ToFloatRegister(lir->input());
+ Register64 output = ToOutRegister64(lir);
+
+ MWasmTruncateToInt64* mir = lir->mir();
+ MIRType fromType = mir->input()->type();
+
+ MOZ_ASSERT(fromType == MIRType::Double || fromType == MIRType::Float32);
+
+ auto* ool = new (alloc()) OutOfLineWasmTruncateCheck(mir, input, output);
+ addOutOfLineCode(ool, mir);
+
+ Label* oolEntry = ool->entry();
+ Label* oolRejoin = ool->rejoin();
+ bool isSaturating = mir->isSaturating();
+
+ if (fromType == MIRType::Double) {
+ if (mir->isUnsigned()) {
+ masm.wasmTruncateDoubleToUInt64(input, output, isSaturating, oolEntry,
+ oolRejoin, InvalidFloatReg);
+ } else {
+ masm.wasmTruncateDoubleToInt64(input, output, isSaturating, oolEntry,
+ oolRejoin, InvalidFloatReg);
+ }
+ } else {
+ if (mir->isUnsigned()) {
+ masm.wasmTruncateFloat32ToUInt64(input, output, isSaturating, oolEntry,
+ oolRejoin, InvalidFloatReg);
+ } else {
+ masm.wasmTruncateFloat32ToInt64(input, output, isSaturating, oolEntry,
+ oolRejoin, InvalidFloatReg);
+ }
+ }
+}
+
+void CodeGenerator::visitInt64ToFloatingPoint(LInt64ToFloatingPoint* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ FloatRegister output = ToFloatRegister(lir->output());
+
+ MIRType outputType = lir->mir()->type();
+ MOZ_ASSERT(outputType == MIRType::Double || outputType == MIRType::Float32);
+
+ if (outputType == MIRType::Double) {
+ if (lir->mir()->isUnsigned()) {
+ masm.convertUInt64ToDouble(input, output, Register::Invalid());
+ } else {
+ masm.convertInt64ToDouble(input, output);
+ }
+ } else {
+ if (lir->mir()->isUnsigned()) {
+ masm.convertUInt64ToFloat32(input, output, Register::Invalid());
+ } else {
+ masm.convertInt64ToFloat32(input, output);
+ }
+ }
+}
+
+void CodeGenerator::visitTestI64AndBranch(LTestI64AndBranch* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ MBasicBlock* ifTrue = lir->ifTrue();
+ MBasicBlock* ifFalse = lir->ifFalse();
+
+ emitBranch(input.reg, Imm32(0), Assembler::NonZero, ifTrue, ifFalse);
+}
+
+void CodeGenerator::visitTestIAndBranch(LTestIAndBranch* test) {
+ const LAllocation* opd = test->getOperand(0);
+ MBasicBlock* ifTrue = test->ifTrue();
+ MBasicBlock* ifFalse = test->ifFalse();
+
+ emitBranch(ToRegister(opd), Imm32(0), Assembler::NonZero, ifTrue, ifFalse);
+}
+
+void CodeGenerator::visitMinMaxD(LMinMaxD* ins) {
+ FloatRegister first = ToFloatRegister(ins->first());
+ FloatRegister second = ToFloatRegister(ins->second());
+
+ MOZ_ASSERT(first == ToFloatRegister(ins->output()));
+
+ if (ins->mir()->isMax()) {
+ masm.maxDouble(second, first, true);
+ } else {
+ masm.minDouble(second, first, true);
+ }
+}
+
+void CodeGenerator::visitMinMaxF(LMinMaxF* ins) {
+ FloatRegister first = ToFloatRegister(ins->first());
+ FloatRegister second = ToFloatRegister(ins->second());
+
+ MOZ_ASSERT(first == ToFloatRegister(ins->output()));
+
+ if (ins->mir()->isMax()) {
+ masm.maxFloat32(second, first, true);
+ } else {
+ masm.minFloat32(second, first, true);
+ }
+}
+
+void CodeGenerator::visitAddI(LAddI* ins) {
+ const LAllocation* lhs = ins->getOperand(0);
+ const LAllocation* rhs = ins->getOperand(1);
+ const LDefinition* dest = ins->getDef(0);
+
+ MOZ_ASSERT(rhs->isConstant() || rhs->isGeneralReg());
+
+ // If there is no snapshot, we don't need to check for overflow
+ if (!ins->snapshot()) {
+ if (rhs->isConstant()) {
+ masm.ma_add32(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
+ } else {
+ masm.addw(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
+ }
+ return;
+ }
+
+ Label overflow;
+ if (rhs->isConstant()) {
+ masm.ma_add32TestOverflow(ToRegister(dest), ToRegister(lhs),
+ Imm32(ToInt32(rhs)), &overflow);
+ } else {
+ masm.ma_add32TestOverflow(ToRegister(dest), ToRegister(lhs),
+ ToRegister(rhs), &overflow);
+ }
+
+ bailoutFrom(&overflow, ins->snapshot());
+}
+
+void CodeGenerator::visitAddI64(LAddI64* lir) {
+ const LInt64Allocation lhs = lir->getInt64Operand(LAddI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LAddI64::Rhs);
+
+ MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
+
+ if (IsConstant(rhs)) {
+ masm.add64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ return;
+ }
+
+ masm.add64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+}
+
+void CodeGenerator::visitSubI(LSubI* ins) {
+ const LAllocation* lhs = ins->getOperand(0);
+ const LAllocation* rhs = ins->getOperand(1);
+ const LDefinition* dest = ins->getDef(0);
+
+ MOZ_ASSERT(rhs->isConstant() || rhs->isGeneralReg());
+
+ // If there is no snapshot, we don't need to check for overflow
+
+ if (!ins->snapshot()) {
+ if (rhs->isConstant()) {
+ masm.ma_sub32(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
+ } else {
+ masm.ma_sub32(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
+ }
+ return;
+ }
+
+ Label overflow;
+ if (rhs->isConstant()) {
+ masm.ma_sub32TestOverflow(ToRegister(dest), ToRegister(lhs),
+ Imm32(ToInt32(rhs)), &overflow);
+ } else {
+ masm.ma_sub32TestOverflow(ToRegister(dest), ToRegister(lhs),
+ ToRegister(rhs), &overflow);
+ }
+
+ bailoutFrom(&overflow, ins->snapshot());
+}
+
+void CodeGenerator::visitSubI64(LSubI64* lir) {
+ const LInt64Allocation lhs = lir->getInt64Operand(LSubI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LSubI64::Rhs);
+
+ MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
+
+ if (IsConstant(rhs)) {
+ masm.sub64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ return;
+ }
+
+ masm.sub64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+}
+
+void CodeGenerator::visitMulI(LMulI* ins) {
+ const LAllocation* lhs = ins->lhs();
+ const LAllocation* rhs = ins->rhs();
+ Register dest = ToRegister(ins->output());
+ MMul* mul = ins->mir();
+
+ MOZ_ASSERT_IF(mul->mode() == MMul::Integer,
+ !mul->canBeNegativeZero() && !mul->canOverflow());
+
+ if (rhs->isConstant()) {
+ int32_t constant = ToInt32(rhs);
+ Register src = ToRegister(lhs);
+
+ // Bailout on -0.0
+ if (mul->canBeNegativeZero() && constant <= 0) {
+ Assembler::Condition cond =
+ (constant == 0) ? Assembler::LessThan : Assembler::Equal;
+ bailoutCmp32(cond, src, Imm32(0), ins->snapshot());
+ }
+
+ switch (constant) {
+ case -1:
+ if (mul->canOverflow()) {
+ bailoutCmp32(Assembler::Equal, src, Imm32(INT32_MIN),
+ ins->snapshot());
+ }
+
+ masm.ma_sub32(dest, zero, src);
+ break;
+ case 0:
+ masm.move32(zero, dest);
+ break;
+ case 1:
+ masm.move32(src, dest);
+ break;
+ case 2:
+ if (mul->canOverflow()) {
+ Label mulTwoOverflow;
+ masm.ma_add32TestOverflow(dest, src, src, &mulTwoOverflow);
+
+ bailoutFrom(&mulTwoOverflow, ins->snapshot());
+ } else {
+ masm.addw(dest, src, src);
+ }
+ break;
+ default:
+ uint32_t shift = FloorLog2(constant);
+
+ if (!mul->canOverflow() && (constant > 0)) {
+ // If it cannot overflow, we can do lots of optimizations.
+ uint32_t rest = constant - (1 << shift);
+
+ // See if the constant has one bit set, meaning it can be
+ // encoded as a bitshift.
+ if ((1 << shift) == constant) {
+ masm.slliw(dest, src, shift % 32);
+ return;
+ }
+
+ // If the constant cannot be encoded as (1<<C1), see if it can
+ // be encoded as (1<<C1) | (1<<C2), which can be computed
+ // using an add and a shift.
+ uint32_t shift_rest = FloorLog2(rest);
+ if (src != dest && (1u << shift_rest) == rest) {
+ masm.slliw(dest, src, (shift - shift_rest) % 32);
+ masm.add32(src, dest);
+ if (shift_rest != 0) {
+ masm.slliw(dest, dest, shift_rest % 32);
+ }
+ return;
+ }
+ }
+
+ if (mul->canOverflow() && (constant > 0) && (src != dest)) {
+ // To stay on the safe side, only optimize things that are a
+ // power of 2.
+
+ if ((1 << shift) == constant) {
+ ScratchRegisterScope scratch(masm);
+ // dest = lhs * pow(2, shift)
+ masm.slliw(dest, src, shift % 32);
+ // At runtime, check (lhs == dest >> shift), if this does
+ // not hold, some bits were lost due to overflow, and the
+ // computation should be resumed as a double.
+ masm.sraiw(scratch, dest, shift % 32);
+ bailoutCmp32(Assembler::NotEqual, src, Register(scratch),
+ ins->snapshot());
+ return;
+ }
+ }
+
+ if (mul->canOverflow()) {
+ Label mulConstOverflow;
+ masm.ma_mul32TestOverflow(dest, ToRegister(lhs), Imm32(ToInt32(rhs)),
+ &mulConstOverflow);
+
+ bailoutFrom(&mulConstOverflow, ins->snapshot());
+ } else {
+ masm.ma_mul32(dest, src, Imm32(ToInt32(rhs)));
+ }
+ break;
+ }
+ } else {
+ Label multRegOverflow;
+
+ if (mul->canOverflow()) {
+ masm.ma_mul32TestOverflow(dest, ToRegister(lhs), ToRegister(rhs),
+ &multRegOverflow);
+ bailoutFrom(&multRegOverflow, ins->snapshot());
+ } else {
+ masm.mulw(dest, ToRegister(lhs), ToRegister(rhs));
+ }
+
+ if (mul->canBeNegativeZero()) {
+ Label done;
+ masm.ma_b(dest, dest, &done, Assembler::NonZero, ShortJump);
+
+ // Result is -0 if lhs or rhs is negative.
+ // In that case result must be double value so bailout
+ UseScratchRegisterScope temps(&masm);
+ Register scratch = temps.Acquire();
+ masm.or_(scratch, ToRegister(lhs), ToRegister(rhs));
+ bailoutCmp32(Assembler::Signed, scratch, scratch, ins->snapshot());
+
+ masm.bind(&done);
+ }
+ }
+}
+
+void CodeGenerator::visitMulI64(LMulI64* lir) {
+ const LInt64Allocation lhs = lir->getInt64Operand(LMulI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LMulI64::Rhs);
+ const Register64 output = ToOutRegister64(lir);
+
+ if (IsConstant(rhs)) {
+ int64_t constant = ToInt64(rhs);
+ switch (constant) {
+ case -1:
+ masm.neg64(ToRegister64(lhs));
+ return;
+ case 0:
+ masm.xor64(ToRegister64(lhs), ToRegister64(lhs));
+ return;
+ case 1:
+ // nop
+ return;
+ default:
+ if (constant > 0) {
+ if (mozilla::IsPowerOfTwo(static_cast<uint32_t>(constant + 1))) {
+ masm.move64(ToRegister64(lhs), output);
+ masm.lshift64(Imm32(FloorLog2(constant + 1)), output);
+ masm.sub64(ToRegister64(lhs), output);
+ return;
+ } else if (mozilla::IsPowerOfTwo(
+ static_cast<uint32_t>(constant - 1))) {
+ masm.move64(ToRegister64(lhs), output);
+ masm.lshift64(Imm32(FloorLog2(constant - 1u)), output);
+ masm.add64(ToRegister64(lhs), output);
+ return;
+ }
+ // Use shift if constant is power of 2.
+ int32_t shift = mozilla::FloorLog2(constant);
+ if (int64_t(1) << shift == constant) {
+ masm.lshift64(Imm32(shift), ToRegister64(lhs));
+ return;
+ }
+ }
+ Register temp = ToTempRegisterOrInvalid(lir->temp());
+ masm.mul64(Imm64(constant), ToRegister64(lhs), temp);
+ }
+ } else {
+ Register temp = ToTempRegisterOrInvalid(lir->temp());
+ masm.mul64(ToOperandOrRegister64(rhs), ToRegister64(lhs), temp);
+ }
+}
+
+void CodeGenerator::visitDivI(LDivI* ins) {
+ // Extract the registers from this instruction
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+ Register dest = ToRegister(ins->output());
+ Register temp = ToRegister(ins->getTemp(0));
+ MDiv* mir = ins->mir();
+
+ Label done;
+
+ // Handle divide by zero.
+ if (mir->canBeDivideByZero()) {
+ if (mir->trapOnError()) {
+ Label nonZero;
+ masm.ma_b(rhs, rhs, &nonZero, Assembler::NonZero);
+ masm.wasmTrap(wasm::Trap::IntegerDivideByZero, mir->bytecodeOffset());
+ masm.bind(&nonZero);
+ } else if (mir->canTruncateInfinities()) {
+ // Truncated division by zero is zero (Infinity|0 == 0)
+ Label notzero;
+ masm.ma_b(rhs, rhs, &notzero, Assembler::NonZero, ShortJump);
+ masm.move32(Imm32(0), dest);
+ masm.ma_branch(&done, ShortJump);
+ masm.bind(&notzero);
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ bailoutCmp32(Assembler::Zero, rhs, rhs, ins->snapshot());
+ }
+ }
+
+ // Handle an integer overflow exception from -2147483648 / -1.
+ if (mir->canBeNegativeOverflow()) {
+ Label notMinInt;
+ masm.move32(Imm32(INT32_MIN), temp);
+ masm.ma_b(lhs, temp, &notMinInt, Assembler::NotEqual, ShortJump);
+
+ masm.move32(Imm32(-1), temp);
+ if (mir->trapOnError()) {
+ Label ok;
+ masm.ma_b(rhs, temp, &ok, Assembler::NotEqual);
+ masm.wasmTrap(wasm::Trap::IntegerOverflow, mir->bytecodeOffset());
+ masm.bind(&ok);
+ } else if (mir->canTruncateOverflow()) {
+ // (-INT32_MIN)|0 == INT32_MIN
+ Label skip;
+ masm.ma_b(rhs, temp, &skip, Assembler::NotEqual, ShortJump);
+ masm.move32(Imm32(INT32_MIN), dest);
+ masm.ma_branch(&done, ShortJump);
+ masm.bind(&skip);
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ bailoutCmp32(Assembler::Equal, rhs, temp, ins->snapshot());
+ }
+ masm.bind(&notMinInt);
+ }
+
+ // Handle negative 0. (0/-Y)
+ if (!mir->canTruncateNegativeZero() && mir->canBeNegativeZero()) {
+ Label nonzero;
+ masm.ma_b(lhs, lhs, &nonzero, Assembler::NonZero, ShortJump);
+ bailoutCmp32(Assembler::LessThan, rhs, Imm32(0), ins->snapshot());
+ masm.bind(&nonzero);
+ }
+ // Note: above safety checks could not be verified as Ion seems to be
+ // smarter and requires double arithmetic in such cases.
+
+ // All regular. Lets call div.
+ if (mir->canTruncateRemainder()) {
+ masm.ma_div32(dest, lhs, rhs);
+ } else {
+ MOZ_ASSERT(mir->fallible());
+
+ Label remainderNonZero;
+ masm.ma_div_branch_overflow(dest, lhs, rhs, &remainderNonZero);
+ bailoutFrom(&remainderNonZero, ins->snapshot());
+ }
+
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitDivPowTwoI(LDivPowTwoI* ins) {
+ Register lhs = ToRegister(ins->numerator());
+ Register dest = ToRegister(ins->output());
+ Register tmp = ToRegister(ins->getTemp(0));
+ int32_t shift = ins->shift();
+
+ if (shift != 0) {
+ MDiv* mir = ins->mir();
+ if (!mir->isTruncated()) {
+ // If the remainder is going to be != 0, bailout since this must
+ // be a double.
+ masm.slliw(tmp, lhs, (32 - shift) % 32);
+ bailoutCmp32(Assembler::NonZero, tmp, tmp, ins->snapshot());
+ }
+
+ if (!mir->canBeNegativeDividend()) {
+ // Numerator is unsigned, so needs no adjusting. Do the shift.
+ masm.sraiw(dest, lhs, shift % 32);
+ return;
+ }
+
+ // Adjust the value so that shifting produces a correctly rounded result
+ // when the numerator is negative. See 10-1 "Signed Division by a Known
+ // Power of 2" in Henry S. Warren, Jr.'s Hacker's Delight.
+ if (shift > 1) {
+ masm.sraiw(tmp, lhs, 31);
+ masm.srliw(tmp, tmp, (32 - shift) % 32);
+ masm.add32(lhs, tmp);
+ } else {
+ masm.srliw(tmp, lhs, (32 - shift) % 32);
+ masm.add32(lhs, tmp);
+ }
+
+ // Do the shift.
+ masm.sraiw(dest, tmp, shift % 32);
+ } else {
+ masm.move32(lhs, dest);
+ }
+}
+
+void CodeGenerator::visitModI(LModI* ins) {
+ // Extract the registers from this instruction
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+ Register dest = ToRegister(ins->output());
+ Register callTemp = ToRegister(ins->callTemp());
+ MMod* mir = ins->mir();
+ Label done, prevent;
+
+ masm.move32(lhs, callTemp);
+
+ // Prevent INT_MIN % -1;
+ // The integer division will give INT_MIN, but we want -(double)INT_MIN.
+ if (mir->canBeNegativeDividend()) {
+ masm.ma_b(lhs, Imm32(INT_MIN), &prevent, Assembler::NotEqual, ShortJump);
+ if (mir->isTruncated()) {
+ // (INT_MIN % -1)|0 == 0
+ Label skip;
+ masm.ma_b(rhs, Imm32(-1), &skip, Assembler::NotEqual, ShortJump);
+ masm.move32(Imm32(0), dest);
+ masm.ma_branch(&done, ShortJump);
+ masm.bind(&skip);
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ bailoutCmp32(Assembler::Equal, rhs, Imm32(-1), ins->snapshot());
+ }
+ masm.bind(&prevent);
+ }
+
+ // 0/X (with X < 0) is bad because both of these values *should* be
+ // doubles, and the result should be -0.0, which cannot be represented in
+ // integers. X/0 is bad because it will give garbage (or abort), when it
+ // should give either \infty, -\infty or NAN.
+
+ // Prevent 0 / X (with X < 0) and X / 0
+ // testing X / Y. Compare Y with 0.
+ // There are three cases: (Y < 0), (Y == 0) and (Y > 0)
+ // If (Y < 0), then we compare X with 0, and bail if X == 0
+ // If (Y == 0), then we simply want to bail.
+ // if (Y > 0), we don't bail.
+
+ if (mir->canBeDivideByZero()) {
+ if (mir->isTruncated()) {
+ if (mir->trapOnError()) {
+ Label nonZero;
+ masm.ma_b(rhs, rhs, &nonZero, Assembler::NonZero);
+ masm.wasmTrap(wasm::Trap::IntegerDivideByZero, mir->bytecodeOffset());
+ masm.bind(&nonZero);
+ } else {
+ Label skip;
+ masm.ma_b(rhs, Imm32(0), &skip, Assembler::NotEqual, ShortJump);
+ masm.move32(Imm32(0), dest);
+ masm.ma_branch(&done, ShortJump);
+ masm.bind(&skip);
+ }
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ bailoutCmp32(Assembler::Equal, rhs, Imm32(0), ins->snapshot());
+ }
+ }
+
+ if (mir->canBeNegativeDividend()) {
+ Label notNegative;
+ masm.ma_b(rhs, Imm32(0), &notNegative, Assembler::GreaterThan, ShortJump);
+ if (mir->isTruncated()) {
+ // NaN|0 == 0 and (0 % -X)|0 == 0
+ Label skip;
+ masm.ma_b(lhs, Imm32(0), &skip, Assembler::NotEqual, ShortJump);
+ masm.move32(Imm32(0), dest);
+ masm.ma_branch(&done, ShortJump);
+ masm.bind(&skip);
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ bailoutCmp32(Assembler::Equal, lhs, Imm32(0), ins->snapshot());
+ }
+ masm.bind(&notNegative);
+ }
+
+ masm.ma_mod32(dest, lhs, rhs);
+
+ // If X%Y == 0 and X < 0, then we *actually* wanted to return -0.0
+ if (mir->canBeNegativeDividend()) {
+ if (mir->isTruncated()) {
+ // -0.0|0 == 0
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ // See if X < 0
+ masm.ma_b(dest, Imm32(0), &done, Assembler::NotEqual, ShortJump);
+ bailoutCmp32(Assembler::Signed, callTemp, Imm32(0), ins->snapshot());
+ }
+ }
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitModPowTwoI(LModPowTwoI* ins) {
+ Register in = ToRegister(ins->getOperand(0));
+ Register out = ToRegister(ins->getDef(0));
+ MMod* mir = ins->mir();
+ Label negative, done;
+
+ masm.move32(in, out);
+ masm.ma_b(in, in, &done, Assembler::Zero, ShortJump);
+ // Switch based on sign of the lhs.
+ // Positive numbers are just a bitmask
+ masm.ma_b(in, in, &negative, Assembler::Signed, ShortJump);
+ {
+ masm.and32(Imm32((1 << ins->shift()) - 1), out);
+ masm.ma_branch(&done, ShortJump);
+ }
+
+ // Negative numbers need a negate, bitmask, negate
+ {
+ masm.bind(&negative);
+ masm.neg32(out);
+ masm.and32(Imm32((1 << ins->shift()) - 1), out);
+ masm.neg32(out);
+ }
+ if (mir->canBeNegativeDividend()) {
+ if (!mir->isTruncated()) {
+ MOZ_ASSERT(mir->fallible());
+ bailoutCmp32(Assembler::Equal, out, zero, ins->snapshot());
+ } else {
+ // -0|0 == 0
+ }
+ }
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitModMaskI(LModMaskI* ins) {
+ Register src = ToRegister(ins->getOperand(0));
+ Register dest = ToRegister(ins->getDef(0));
+ Register tmp0 = ToRegister(ins->getTemp(0));
+ Register tmp1 = ToRegister(ins->getTemp(1));
+ MMod* mir = ins->mir();
+
+ if (!mir->isTruncated() && mir->canBeNegativeDividend()) {
+ MOZ_ASSERT(mir->fallible());
+
+ Label bail;
+ masm.ma_mod_mask(src, dest, tmp0, tmp1, ins->shift(), &bail);
+ bailoutFrom(&bail, ins->snapshot());
+ } else {
+ masm.ma_mod_mask(src, dest, tmp0, tmp1, ins->shift(), nullptr);
+ }
+}
+
+void CodeGenerator::visitBitNotI(LBitNotI* ins) {
+ const LAllocation* input = ins->getOperand(0);
+ const LDefinition* dest = ins->getDef(0);
+ MOZ_ASSERT(!input->isConstant());
+
+ masm.nor(ToRegister(dest), ToRegister(input), zero);
+}
+
+void CodeGenerator::visitBitNotI64(LBitNotI64* ins) {
+ const LAllocation* input = ins->getOperand(0);
+ MOZ_ASSERT(!input->isConstant());
+ Register inputReg = ToRegister(input);
+ MOZ_ASSERT(inputReg == ToRegister(ins->output()));
+ masm.nor(inputReg, inputReg, zero);
+}
+
+void CodeGenerator::visitBitOpI(LBitOpI* ins) {
+ const LAllocation* lhs = ins->getOperand(0);
+ const LAllocation* rhs = ins->getOperand(1);
+ const LDefinition* dest = ins->getDef(0);
+ // all of these bitops should be either imm32's, or integer registers.
+ switch (ins->bitop()) {
+ case JSOp::BitOr:
+ if (rhs->isConstant()) {
+ masm.ma_or(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
+ } else {
+ masm.or_(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
+ masm.slliw(ToRegister(dest), ToRegister(dest), 0);
+ }
+ break;
+ case JSOp::BitXor:
+ if (rhs->isConstant()) {
+ masm.ma_xor(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
+ } else {
+ masm.ma_xor(ToRegister(dest), ToRegister(lhs),
+ Operand(ToRegister(rhs)));
+ masm.slliw(ToRegister(dest), ToRegister(dest), 0);
+ }
+ break;
+ case JSOp::BitAnd:
+ if (rhs->isConstant()) {
+ masm.ma_and(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
+ } else {
+ masm.and_(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
+ masm.slliw(ToRegister(dest), ToRegister(dest), 0);
+ }
+ break;
+ default:
+ MOZ_CRASH("unexpected binary opcode");
+ }
+}
+
+void CodeGenerator::visitBitOpI64(LBitOpI64* lir) {
+ const LInt64Allocation lhs = lir->getInt64Operand(LBitOpI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LBitOpI64::Rhs);
+
+ MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
+
+ switch (lir->bitop()) {
+ case JSOp::BitOr:
+ if (IsConstant(rhs)) {
+ masm.or64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ } else {
+ masm.or64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+ }
+ break;
+ case JSOp::BitXor:
+ if (IsConstant(rhs)) {
+ masm.xor64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ } else {
+ masm.xor64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+ }
+ break;
+ case JSOp::BitAnd:
+ if (IsConstant(rhs)) {
+ masm.and64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ } else {
+ masm.and64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+ }
+ break;
+ default:
+ MOZ_CRASH("unexpected binary opcode");
+ }
+}
+
+void CodeGenerator::visitShiftI(LShiftI* ins) {
+ Register lhs = ToRegister(ins->lhs());
+ const LAllocation* rhs = ins->rhs();
+ Register dest = ToRegister(ins->output());
+
+ if (rhs->isConstant()) {
+ int32_t shift = ToInt32(rhs) & 0x1F;
+ switch (ins->bitop()) {
+ case JSOp::Lsh:
+ if (shift) {
+ masm.slliw(dest, lhs, shift % 32);
+ } else {
+ masm.move32(lhs, dest);
+ }
+ break;
+ case JSOp::Rsh:
+ if (shift) {
+ masm.sraiw(dest, lhs, shift % 32);
+ } else {
+ masm.move32(lhs, dest);
+ }
+ break;
+ case JSOp::Ursh:
+ if (shift) {
+ masm.srliw(dest, lhs, shift % 32);
+ } else {
+ // x >>> 0 can overflow.
+ if (ins->mir()->toUrsh()->fallible()) {
+ bailoutCmp32(Assembler::LessThan, lhs, Imm32(0), ins->snapshot());
+ }
+ masm.move32(lhs, dest);
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift op");
+ }
+ } else {
+ // The shift amounts should be AND'ed into the 0-31 range
+ masm.ma_and(dest, ToRegister(rhs), Imm32(0x1F));
+
+ switch (ins->bitop()) {
+ case JSOp::Lsh:
+ masm.sllw(dest, lhs, dest);
+ break;
+ case JSOp::Rsh:
+ masm.sraw(dest, lhs, dest);
+ break;
+ case JSOp::Ursh:
+ masm.srlw(dest, lhs, dest);
+ if (ins->mir()->toUrsh()->fallible()) {
+ // x >>> 0 can overflow.
+ bailoutCmp32(Assembler::LessThan, dest, Imm32(0), ins->snapshot());
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift op");
+ }
+ }
+}
+
+void CodeGenerator::visitShiftI64(LShiftI64* lir) {
+ const LInt64Allocation lhs = lir->getInt64Operand(LShiftI64::Lhs);
+ LAllocation* rhs = lir->getOperand(LShiftI64::Rhs);
+
+ MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
+
+ if (rhs->isConstant()) {
+ int32_t shift = int32_t(rhs->toConstant()->toInt64() & 0x3F);
+ switch (lir->bitop()) {
+ case JSOp::Lsh:
+ if (shift) {
+ masm.lshift64(Imm32(shift), ToRegister64(lhs));
+ }
+ break;
+ case JSOp::Rsh:
+ if (shift) {
+ masm.rshift64Arithmetic(Imm32(shift), ToRegister64(lhs));
+ }
+ break;
+ case JSOp::Ursh:
+ if (shift) {
+ masm.rshift64(Imm32(shift), ToRegister64(lhs));
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift op");
+ }
+ return;
+ }
+
+ switch (lir->bitop()) {
+ case JSOp::Lsh:
+ masm.lshift64(ToRegister(rhs), ToRegister64(lhs));
+ break;
+ case JSOp::Rsh:
+ masm.rshift64Arithmetic(ToRegister(rhs), ToRegister64(lhs));
+ break;
+ case JSOp::Ursh:
+ masm.rshift64(ToRegister(rhs), ToRegister64(lhs));
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift op");
+ }
+}
+
+void CodeGenerator::visitRotateI64(LRotateI64* lir) {
+ MRotate* mir = lir->mir();
+ LAllocation* count = lir->count();
+
+ Register64 input = ToRegister64(lir->input());
+ Register64 output = ToOutRegister64(lir);
+ Register temp = ToTempRegisterOrInvalid(lir->temp());
+
+ MOZ_ASSERT(input == output);
+
+ if (count->isConstant()) {
+ int32_t c = int32_t(count->toConstant()->toInt64() & 0x3F);
+ if (!c) {
+ return;
+ }
+ if (mir->isLeftRotate()) {
+ masm.rotateLeft64(Imm32(c), input, output, temp);
+ } else {
+ masm.rotateRight64(Imm32(c), input, output, temp);
+ }
+ } else {
+ if (mir->isLeftRotate()) {
+ masm.rotateLeft64(ToRegister(count), input, output, temp);
+ } else {
+ masm.rotateRight64(ToRegister(count), input, output, temp);
+ }
+ }
+}
+
+void CodeGenerator::visitUrshD(LUrshD* ins) {
+ Register lhs = ToRegister(ins->lhs());
+ Register temp = ToRegister(ins->temp());
+
+ const LAllocation* rhs = ins->rhs();
+ FloatRegister out = ToFloatRegister(ins->output());
+
+ if (rhs->isConstant()) {
+ masm.srliw(temp, lhs, ToInt32(rhs) % 32);
+ } else {
+ masm.srlw(temp, lhs, ToRegister(rhs));
+ }
+
+ masm.convertUInt32ToDouble(temp, out);
+}
+
+void CodeGenerator::visitClzI(LClzI* ins) {
+ Register input = ToRegister(ins->input());
+ Register output = ToRegister(ins->output());
+
+ masm.Clz32(output, input);
+}
+
+void CodeGenerator::visitCtzI(LCtzI* ins) {
+ Register input = ToRegister(ins->input());
+ Register output = ToRegister(ins->output());
+
+ masm.Ctz32(output, input);
+}
+
+void CodeGenerator::visitPopcntI(LPopcntI* ins) {
+ Register input = ToRegister(ins->input());
+ Register output = ToRegister(ins->output());
+ Register tmp = ToRegister(ins->temp0());
+
+ masm.Popcnt32(input, output, tmp);
+}
+
+void CodeGenerator::visitPopcntI64(LPopcntI64* ins) {
+ Register64 input = ToRegister64(ins->getInt64Operand(0));
+ Register64 output = ToOutRegister64(ins);
+ Register tmp = ToRegister(ins->getTemp(0));
+
+ masm.Popcnt64(input.scratchReg(), output.scratchReg(), tmp);
+}
+
+void CodeGenerator::visitPowHalfD(LPowHalfD* ins) {
+ FloatRegister input = ToFloatRegister(ins->input());
+ FloatRegister output = ToFloatRegister(ins->output());
+ ScratchDoubleScope fpscratch(masm);
+
+ Label done, skip;
+
+ // Masm.pow(-Infinity, 0.5) == Infinity.
+ masm.loadConstantDouble(NegativeInfinity<double>(), fpscratch);
+ UseScratchRegisterScope temps(&masm);
+ Register scratch = temps.Acquire();
+
+ masm.ma_compareF64(scratch, Assembler::DoubleNotEqualOrUnordered, input,
+ fpscratch);
+ masm.ma_branch(&skip, Assembler::Equal, scratch, Operand(1));
+ // masm.ma_bc_d(input, fpscratch, &skip, Assembler::DoubleNotEqualOrUnordered,
+ // ShortJump);
+ masm.fneg_d(output, fpscratch);
+ masm.ma_branch(&done, ShortJump);
+
+ masm.bind(&skip);
+ // Math.pow(-0, 0.5) == 0 == Math.pow(0, 0.5).
+ // Adding 0 converts any -0 to 0.
+ masm.loadConstantDouble(0.0, fpscratch);
+ masm.fadd_d(output, input, fpscratch);
+ masm.fsqrt_d(output, output);
+
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitMathD(LMathD* math) {
+ FloatRegister src1 = ToFloatRegister(math->getOperand(0));
+ FloatRegister src2 = ToFloatRegister(math->getOperand(1));
+ FloatRegister output = ToFloatRegister(math->getDef(0));
+
+ switch (math->jsop()) {
+ case JSOp::Add:
+ masm.fadd_d(output, src1, src2);
+ break;
+ case JSOp::Sub:
+ masm.fsub_d(output, src1, src2);
+ break;
+ case JSOp::Mul:
+ masm.fmul_d(output, src1, src2);
+ break;
+ case JSOp::Div:
+ masm.fdiv_d(output, src1, src2);
+ break;
+ default:
+ MOZ_CRASH("unexpected opcode");
+ }
+}
+
+void CodeGenerator::visitMathF(LMathF* math) {
+ FloatRegister src1 = ToFloatRegister(math->getOperand(0));
+ FloatRegister src2 = ToFloatRegister(math->getOperand(1));
+ FloatRegister output = ToFloatRegister(math->getDef(0));
+
+ switch (math->jsop()) {
+ case JSOp::Add:
+ masm.fadd_s(output, src1, src2);
+ break;
+ case JSOp::Sub:
+ masm.fsub_s(output, src1, src2);
+ break;
+ case JSOp::Mul:
+ masm.fmul_s(output, src1, src2);
+ break;
+ case JSOp::Div:
+ masm.fdiv_s(output, src1, src2);
+ break;
+ default:
+ MOZ_CRASH("unexpected opcode");
+ }
+}
+
+void CodeGenerator::visitTruncateDToInt32(LTruncateDToInt32* ins) {
+ emitTruncateDouble(ToFloatRegister(ins->input()), ToRegister(ins->output()),
+ ins->mir());
+}
+
+void CodeGenerator::visitTruncateFToInt32(LTruncateFToInt32* ins) {
+ emitTruncateFloat32(ToFloatRegister(ins->input()), ToRegister(ins->output()),
+ ins->mir());
+}
+
+void CodeGenerator::visitWasmBuiltinTruncateDToInt32(
+ LWasmBuiltinTruncateDToInt32* lir) {
+ emitTruncateDouble(ToFloatRegister(lir->getOperand(0)),
+ ToRegister(lir->getDef(0)), lir->mir());
+}
+
+void CodeGenerator::visitWasmBuiltinTruncateFToInt32(
+ LWasmBuiltinTruncateFToInt32* lir) {
+ emitTruncateFloat32(ToFloatRegister(lir->getOperand(0)),
+ ToRegister(lir->getDef(0)), lir->mir());
+}
+
+void CodeGenerator::visitWasmTruncateToInt32(LWasmTruncateToInt32* lir) {
+ auto input = ToFloatRegister(lir->input());
+ auto output = ToRegister(lir->output());
+
+ MWasmTruncateToInt32* mir = lir->mir();
+ MIRType fromType = mir->input()->type();
+
+ MOZ_ASSERT(fromType == MIRType::Double || fromType == MIRType::Float32);
+
+ auto* ool = new (alloc()) OutOfLineWasmTruncateCheck(mir, input, output);
+ addOutOfLineCode(ool, mir);
+
+ Label* oolEntry = ool->entry();
+ if (mir->isUnsigned()) {
+ if (fromType == MIRType::Double) {
+ masm.wasmTruncateDoubleToUInt32(input, output, mir->isSaturating(),
+ oolEntry);
+ } else if (fromType == MIRType::Float32) {
+ masm.wasmTruncateFloat32ToUInt32(input, output, mir->isSaturating(),
+ oolEntry);
+ } else {
+ MOZ_CRASH("unexpected type");
+ }
+
+ masm.bind(ool->rejoin());
+ return;
+ }
+
+ if (fromType == MIRType::Double) {
+ masm.wasmTruncateDoubleToInt32(input, output, mir->isSaturating(),
+ oolEntry);
+ } else if (fromType == MIRType::Float32) {
+ masm.wasmTruncateFloat32ToInt32(input, output, mir->isSaturating(),
+ oolEntry);
+ } else {
+ MOZ_CRASH("unexpected type");
+ }
+
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitCopySignF(LCopySignF* ins) {
+ FloatRegister lhs = ToFloatRegister(ins->getOperand(0));
+ FloatRegister rhs = ToFloatRegister(ins->getOperand(1));
+ FloatRegister output = ToFloatRegister(ins->getDef(0));
+
+ masm.fsgnj_s(output, lhs, rhs);
+}
+
+void CodeGenerator::visitCopySignD(LCopySignD* ins) {
+ FloatRegister lhs = ToFloatRegister(ins->getOperand(0));
+ FloatRegister rhs = ToFloatRegister(ins->getOperand(1));
+ FloatRegister output = ToFloatRegister(ins->getDef(0));
+
+ masm.fsgnj_d(output, lhs, rhs);
+}
+
+void CodeGenerator::visitValue(LValue* value) {
+ const ValueOperand out = ToOutValue(value);
+
+ masm.moveValue(value->value(), out);
+}
+
+void CodeGenerator::visitDouble(LDouble* ins) {
+ const LDefinition* out = ins->getDef(0);
+
+ masm.loadConstantDouble(ins->value(), ToFloatRegister(out));
+}
+
+void CodeGenerator::visitFloat32(LFloat32* ins) {
+ const LDefinition* out = ins->getDef(0);
+ masm.loadConstantFloat32(ins->value(), ToFloatRegister(out));
+}
+
+void CodeGenerator::visitTestDAndBranch(LTestDAndBranch* test) {
+ FloatRegister input = ToFloatRegister(test->input());
+ ScratchDoubleScope fpscratch(masm);
+
+ MBasicBlock* ifTrue = test->ifTrue();
+ MBasicBlock* ifFalse = test->ifFalse();
+
+ masm.loadConstantDouble(0.0, fpscratch);
+ // If 0, or NaN, the result is false.
+ if (isNextBlock(ifFalse->lir())) {
+ branchToBlock(DoubleFloat, input, fpscratch, ifTrue,
+ Assembler::DoubleNotEqual);
+ } else {
+ branchToBlock(DoubleFloat, input, fpscratch, ifFalse,
+ Assembler::DoubleEqualOrUnordered);
+ jumpToBlock(ifTrue);
+ }
+}
+
+void CodeGenerator::visitTestFAndBranch(LTestFAndBranch* test) {
+ FloatRegister input = ToFloatRegister(test->input());
+ ScratchFloat32Scope fpscratch(masm);
+
+ MBasicBlock* ifTrue = test->ifTrue();
+ MBasicBlock* ifFalse = test->ifFalse();
+
+ masm.loadConstantFloat32(0.0f, fpscratch);
+ // If 0, or NaN, the result is false.
+
+ if (isNextBlock(ifFalse->lir())) {
+ branchToBlock(SingleFloat, input, fpscratch, ifTrue,
+ Assembler::DoubleNotEqual);
+ } else {
+ branchToBlock(SingleFloat, input, fpscratch, ifFalse,
+ Assembler::DoubleEqualOrUnordered);
+ jumpToBlock(ifTrue);
+ }
+}
+
+void CodeGenerator::visitCompareD(LCompareD* comp) {
+ FloatRegister lhs = ToFloatRegister(comp->left());
+ FloatRegister rhs = ToFloatRegister(comp->right());
+ Register dest = ToRegister(comp->output());
+
+ Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
+ masm.ma_compareF64(dest, cond, lhs, rhs);
+}
+
+void CodeGenerator::visitCompareF(LCompareF* comp) {
+ FloatRegister lhs = ToFloatRegister(comp->left());
+ FloatRegister rhs = ToFloatRegister(comp->right());
+ Register dest = ToRegister(comp->output());
+
+ Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
+ masm.ma_compareF32(dest, cond, lhs, rhs);
+}
+
+void CodeGenerator::visitCompareDAndBranch(LCompareDAndBranch* comp) {
+ FloatRegister lhs = ToFloatRegister(comp->left());
+ FloatRegister rhs = ToFloatRegister(comp->right());
+
+ Assembler::DoubleCondition cond =
+ JSOpToDoubleCondition(comp->cmpMir()->jsop());
+ MBasicBlock* ifTrue = comp->ifTrue();
+ MBasicBlock* ifFalse = comp->ifFalse();
+
+ if (isNextBlock(ifFalse->lir())) {
+ branchToBlock(DoubleFloat, lhs, rhs, ifTrue, cond);
+ } else {
+ branchToBlock(DoubleFloat, lhs, rhs, ifFalse,
+ Assembler::InvertCondition(cond));
+ jumpToBlock(ifTrue);
+ }
+}
+
+void CodeGenerator::visitCompareFAndBranch(LCompareFAndBranch* comp) {
+ FloatRegister lhs = ToFloatRegister(comp->left());
+ FloatRegister rhs = ToFloatRegister(comp->right());
+
+ Assembler::DoubleCondition cond =
+ JSOpToDoubleCondition(comp->cmpMir()->jsop());
+ MBasicBlock* ifTrue = comp->ifTrue();
+ MBasicBlock* ifFalse = comp->ifFalse();
+
+ if (isNextBlock(ifFalse->lir())) {
+ branchToBlock(SingleFloat, lhs, rhs, ifTrue, cond);
+ } else {
+ branchToBlock(SingleFloat, lhs, rhs, ifFalse,
+ Assembler::InvertCondition(cond));
+ jumpToBlock(ifTrue);
+ }
+}
+
+void CodeGenerator::visitBitAndAndBranch(LBitAndAndBranch* lir) {
+ ScratchRegisterScope scratch(masm);
+ if (lir->right()->isConstant()) {
+ masm.ma_and(scratch, ToRegister(lir->left()), Imm32(ToInt32(lir->right())));
+ } else {
+ masm.ma_and(scratch, ToRegister(lir->left()), ToRegister(lir->right()));
+ }
+ emitBranch(scratch, Register(scratch), lir->cond(), lir->ifTrue(),
+ lir->ifFalse());
+}
+
+void CodeGenerator::visitWasmUint32ToDouble(LWasmUint32ToDouble* lir) {
+ masm.convertUInt32ToDouble(ToRegister(lir->input()),
+ ToFloatRegister(lir->output()));
+}
+
+void CodeGenerator::visitWasmUint32ToFloat32(LWasmUint32ToFloat32* lir) {
+ masm.convertUInt32ToFloat32(ToRegister(lir->input()),
+ ToFloatRegister(lir->output()));
+}
+
+void CodeGenerator::visitNotI(LNotI* ins) {
+ masm.cmp32Set(Assembler::Equal, ToRegister(ins->input()), Imm32(0),
+ ToRegister(ins->output()));
+}
+
+void CodeGenerator::visitNotD(LNotD* ins) {
+ // Since this operation is not, we want to set a bit if
+ // the double is falsey, which means 0.0, -0.0 or NaN.
+ FloatRegister in = ToFloatRegister(ins->input());
+ Register dest = ToRegister(ins->output());
+ ScratchDoubleScope fpscratch(masm);
+
+ masm.loadConstantDouble(0.0, fpscratch);
+ masm.ma_compareF64(dest, Assembler::DoubleEqualOrUnordered, in, fpscratch);
+}
+
+void CodeGenerator::visitNotF(LNotF* ins) {
+ // Since this operation is not, we want to set a bit if
+ // the float32 is falsey, which means 0.0, -0.0 or NaN.
+ FloatRegister in = ToFloatRegister(ins->input());
+ Register dest = ToRegister(ins->output());
+ ScratchFloat32Scope fpscratch(masm);
+
+ masm.loadConstantFloat32(0.0f, fpscratch);
+ masm.ma_compareF32(dest, Assembler::DoubleEqualOrUnordered, in, fpscratch);
+}
+
+void CodeGenerator::visitMemoryBarrier(LMemoryBarrier* ins) {
+ masm.memoryBarrier(ins->type());
+}
+
+void CodeGenerator::visitWasmLoad(LWasmLoad* lir) { emitWasmLoad(lir); }
+
+void CodeGenerator::visitWasmStore(LWasmStore* lir) { emitWasmStore(lir); }
+
+void CodeGenerator::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins) {
+ const MAsmJSLoadHeap* mir = ins->mir();
+ const LAllocation* ptr = ins->ptr();
+ const LDefinition* out = ins->output();
+ const LAllocation* boundsCheckLimit = ins->boundsCheckLimit();
+
+ bool isSigned;
+ int size;
+ bool isFloat = false;
+ switch (mir->access().type()) {
+ case Scalar::Int8:
+ isSigned = true;
+ size = 8;
+ break;
+ case Scalar::Uint8:
+ isSigned = false;
+ size = 8;
+ break;
+ case Scalar::Int16:
+ isSigned = true;
+ size = 16;
+ break;
+ case Scalar::Uint16:
+ isSigned = false;
+ size = 16;
+ break;
+ case Scalar::Int32:
+ isSigned = true;
+ size = 32;
+ break;
+ case Scalar::Uint32:
+ isSigned = false;
+ size = 32;
+ break;
+ case Scalar::Float64:
+ isFloat = true;
+ size = 64;
+ break;
+ case Scalar::Float32:
+ isFloat = true;
+ size = 32;
+ break;
+ default:
+ MOZ_CRASH("unexpected array type");
+ }
+
+ if (ptr->isConstant()) {
+ MOZ_ASSERT(!mir->needsBoundsCheck());
+ int32_t ptrImm = ptr->toConstant()->toInt32();
+ MOZ_ASSERT(ptrImm >= 0);
+ if (isFloat) {
+ if (size == 32) {
+ masm.loadFloat32(Address(HeapReg, ptrImm), ToFloatRegister(out));
+ } else {
+ masm.loadDouble(Address(HeapReg, ptrImm), ToFloatRegister(out));
+ }
+ } else {
+ masm.ma_load(ToRegister(out), Address(HeapReg, ptrImm),
+ static_cast<LoadStoreSize>(size),
+ isSigned ? SignExtend : ZeroExtend);
+ }
+ return;
+ }
+
+ Register ptrReg = ToRegister(ptr);
+
+ if (!mir->needsBoundsCheck()) {
+ if (isFloat) {
+ if (size == 32) {
+ masm.loadFloat32(BaseIndex(HeapReg, ptrReg, TimesOne),
+ ToFloatRegister(out));
+ } else {
+ masm.loadDouble(BaseIndex(HeapReg, ptrReg, TimesOne),
+ ToFloatRegister(out));
+ }
+ } else {
+ masm.ma_load(ToRegister(out), BaseIndex(HeapReg, ptrReg, TimesOne),
+ static_cast<LoadStoreSize>(size),
+ isSigned ? SignExtend : ZeroExtend);
+ }
+ return;
+ }
+
+ Label done, outOfRange;
+ masm.wasmBoundsCheck32(Assembler::AboveOrEqual, ptrReg,
+ ToRegister(boundsCheckLimit), &outOfRange);
+ // Offset is ok, let's load value.
+ if (isFloat) {
+ if (size == 32) {
+ masm.loadFloat32(BaseIndex(HeapReg, ptrReg, TimesOne),
+ ToFloatRegister(out));
+ } else {
+ masm.loadDouble(BaseIndex(HeapReg, ptrReg, TimesOne),
+ ToFloatRegister(out));
+ }
+ } else {
+ masm.ma_load(ToRegister(out), BaseIndex(HeapReg, ptrReg, TimesOne),
+ static_cast<LoadStoreSize>(size),
+ isSigned ? SignExtend : ZeroExtend);
+ }
+ masm.ma_branch(&done, ShortJump);
+ masm.bind(&outOfRange);
+ // Offset is out of range. Load default values.
+ if (isFloat) {
+ if (size == 32) {
+ masm.loadConstantFloat32(float(GenericNaN()), ToFloatRegister(out));
+ } else {
+ masm.loadConstantDouble(GenericNaN(), ToFloatRegister(out));
+ }
+ } else {
+ masm.move32(Imm32(0), ToRegister(out));
+ }
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins) {
+ const MAsmJSStoreHeap* mir = ins->mir();
+ const LAllocation* value = ins->value();
+ const LAllocation* ptr = ins->ptr();
+ const LAllocation* boundsCheckLimit = ins->boundsCheckLimit();
+
+ bool isSigned;
+ int size;
+ bool isFloat = false;
+ switch (mir->access().type()) {
+ case Scalar::Int8:
+ isSigned = true;
+ size = 8;
+ break;
+ case Scalar::Uint8:
+ isSigned = false;
+ size = 8;
+ break;
+ case Scalar::Int16:
+ isSigned = true;
+ size = 16;
+ break;
+ case Scalar::Uint16:
+ isSigned = false;
+ size = 16;
+ break;
+ case Scalar::Int32:
+ isSigned = true;
+ size = 32;
+ break;
+ case Scalar::Uint32:
+ isSigned = false;
+ size = 32;
+ break;
+ case Scalar::Float64:
+ isFloat = true;
+ size = 64;
+ break;
+ case Scalar::Float32:
+ isFloat = true;
+ size = 32;
+ break;
+ default:
+ MOZ_CRASH("unexpected array type");
+ }
+
+ if (ptr->isConstant()) {
+ MOZ_ASSERT(!mir->needsBoundsCheck());
+ int32_t ptrImm = ptr->toConstant()->toInt32();
+ MOZ_ASSERT(ptrImm >= 0);
+
+ if (isFloat) {
+ FloatRegister freg = ToFloatRegister(value);
+ Address addr(HeapReg, ptrImm);
+ if (size == 32) {
+ masm.storeFloat32(freg, addr);
+ } else {
+ masm.storeDouble(freg, addr);
+ }
+ } else {
+ masm.ma_store(ToRegister(value), Address(HeapReg, ptrImm),
+ static_cast<LoadStoreSize>(size),
+ isSigned ? SignExtend : ZeroExtend);
+ }
+ return;
+ }
+
+ Register ptrReg = ToRegister(ptr);
+ Address dstAddr(ptrReg, 0);
+
+ if (!mir->needsBoundsCheck()) {
+ if (isFloat) {
+ FloatRegister freg = ToFloatRegister(value);
+ BaseIndex bi(HeapReg, ptrReg, TimesOne);
+ if (size == 32) {
+ masm.storeFloat32(freg, bi);
+ } else {
+ masm.storeDouble(freg, bi);
+ }
+ } else {
+ masm.ma_store(ToRegister(value), BaseIndex(HeapReg, ptrReg, TimesOne),
+ static_cast<LoadStoreSize>(size),
+ isSigned ? SignExtend : ZeroExtend);
+ }
+ return;
+ }
+
+ Label outOfRange;
+ masm.wasmBoundsCheck32(Assembler::AboveOrEqual, ptrReg,
+ ToRegister(boundsCheckLimit), &outOfRange);
+
+ // Offset is ok, let's store value.
+ if (isFloat) {
+ if (size == 32) {
+ masm.storeFloat32(ToFloatRegister(value),
+ BaseIndex(HeapReg, ptrReg, TimesOne));
+ } else
+ masm.storeDouble(ToFloatRegister(value),
+ BaseIndex(HeapReg, ptrReg, TimesOne));
+ } else {
+ masm.ma_store(ToRegister(value), BaseIndex(HeapReg, ptrReg, TimesOne),
+ static_cast<LoadStoreSize>(size),
+ isSigned ? SignExtend : ZeroExtend);
+ }
+
+ masm.bind(&outOfRange);
+}
+
+void CodeGenerator::visitWasmCompareExchangeHeap(
+ LWasmCompareExchangeHeap* ins) {
+ MWasmCompareExchangeHeap* mir = ins->mir();
+ Register ptrReg = ToRegister(ins->ptr());
+ BaseIndex srcAddr(HeapReg, ptrReg, TimesOne, mir->access().offset());
+ MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
+
+ Register oldval = ToRegister(ins->oldValue());
+ Register newval = ToRegister(ins->newValue());
+ Register valueTemp = ToTempRegisterOrInvalid(ins->valueTemp());
+ Register offsetTemp = ToTempRegisterOrInvalid(ins->offsetTemp());
+ Register maskTemp = ToTempRegisterOrInvalid(ins->maskTemp());
+
+ masm.wasmCompareExchange(mir->access(), srcAddr, oldval, newval, valueTemp,
+ offsetTemp, maskTemp, ToRegister(ins->output()));
+}
+
+void CodeGenerator::visitWasmAtomicExchangeHeap(LWasmAtomicExchangeHeap* ins) {
+ MWasmAtomicExchangeHeap* mir = ins->mir();
+ Register ptrReg = ToRegister(ins->ptr());
+ Register value = ToRegister(ins->value());
+ BaseIndex srcAddr(HeapReg, ptrReg, TimesOne, mir->access().offset());
+ MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
+
+ Register valueTemp = ToTempRegisterOrInvalid(ins->valueTemp());
+ Register offsetTemp = ToTempRegisterOrInvalid(ins->offsetTemp());
+ Register maskTemp = ToTempRegisterOrInvalid(ins->maskTemp());
+
+ masm.wasmAtomicExchange(mir->access(), srcAddr, value, valueTemp, offsetTemp,
+ maskTemp, ToRegister(ins->output()));
+}
+
+void CodeGenerator::visitWasmAtomicBinopHeap(LWasmAtomicBinopHeap* ins) {
+ MOZ_ASSERT(ins->mir()->hasUses());
+ MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
+
+ MWasmAtomicBinopHeap* mir = ins->mir();
+ Register ptrReg = ToRegister(ins->ptr());
+ Register valueTemp = ToTempRegisterOrInvalid(ins->valueTemp());
+ Register offsetTemp = ToTempRegisterOrInvalid(ins->offsetTemp());
+ Register maskTemp = ToTempRegisterOrInvalid(ins->maskTemp());
+
+ BaseIndex srcAddr(HeapReg, ptrReg, TimesOne, mir->access().offset());
+
+ masm.wasmAtomicFetchOp(mir->access(), mir->operation(),
+ ToRegister(ins->value()), srcAddr, valueTemp,
+ offsetTemp, maskTemp, ToRegister(ins->output()));
+}
+
+void CodeGenerator::visitWasmAtomicBinopHeapForEffect(
+ LWasmAtomicBinopHeapForEffect* ins) {
+ MOZ_ASSERT(!ins->mir()->hasUses());
+ MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
+
+ MWasmAtomicBinopHeap* mir = ins->mir();
+ Register ptrReg = ToRegister(ins->ptr());
+ Register valueTemp = ToTempRegisterOrInvalid(ins->valueTemp());
+ Register offsetTemp = ToTempRegisterOrInvalid(ins->offsetTemp());
+ Register maskTemp = ToTempRegisterOrInvalid(ins->maskTemp());
+
+ BaseIndex srcAddr(HeapReg, ptrReg, TimesOne, mir->access().offset());
+ masm.wasmAtomicEffectOp(mir->access(), mir->operation(),
+ ToRegister(ins->value()), srcAddr, valueTemp,
+ offsetTemp, maskTemp);
+}
+
+void CodeGenerator::visitWasmStackArg(LWasmStackArg* ins) {
+ const MWasmStackArg* mir = ins->mir();
+ if (ins->arg()->isConstant()) {
+ masm.storePtr(ImmWord(ToInt32(ins->arg())),
+ Address(StackPointer, mir->spOffset()));
+ } else {
+ if (ins->arg()->isGeneralReg()) {
+ masm.storePtr(ToRegister(ins->arg()),
+ Address(StackPointer, mir->spOffset()));
+ } else if (mir->input()->type() == MIRType::Double) {
+ masm.storeDouble(ToFloatRegister(ins->arg()),
+ Address(StackPointer, mir->spOffset()));
+ } else {
+ masm.storeFloat32(ToFloatRegister(ins->arg()),
+ Address(StackPointer, mir->spOffset()));
+ }
+ }
+}
+
+void CodeGenerator::visitWasmStackArgI64(LWasmStackArgI64* ins) {
+ const MWasmStackArg* mir = ins->mir();
+ Address dst(StackPointer, mir->spOffset());
+ if (IsConstant(ins->arg())) {
+ masm.store64(Imm64(ToInt64(ins->arg())), dst);
+ } else {
+ masm.store64(ToRegister64(ins->arg()), dst);
+ }
+}
+
+void CodeGenerator::visitWasmSelect(LWasmSelect* ins) {
+ MIRType mirType = ins->mir()->type();
+
+ Register cond = ToRegister(ins->condExpr());
+ const LAllocation* falseExpr = ins->falseExpr();
+
+ if (mirType == MIRType::Int32 || mirType == MIRType::RefOrNull) {
+ Register out = ToRegister(ins->output());
+ MOZ_ASSERT(ToRegister(ins->trueExpr()) == out,
+ "true expr input is reused for output");
+ if (falseExpr->isRegister()) {
+ masm.moveIfZero(out, ToRegister(falseExpr), cond);
+ } else {
+ masm.cmp32Load32(Assembler::Zero, cond, cond, ToAddress(falseExpr), out);
+ }
+ return;
+ }
+
+ FloatRegister out = ToFloatRegister(ins->output());
+ MOZ_ASSERT(ToFloatRegister(ins->trueExpr()) == out,
+ "true expr input is reused for output");
+
+ if (falseExpr->isFloatReg()) {
+ if (mirType == MIRType::Float32) {
+ masm.ma_fmovz(SingleFloat, out, ToFloatRegister(falseExpr), cond);
+ } else if (mirType == MIRType::Double) {
+ masm.ma_fmovz(DoubleFloat, out, ToFloatRegister(falseExpr), cond);
+ } else {
+ MOZ_CRASH("unhandled type in visitWasmSelect!");
+ }
+ } else {
+ Label done;
+ masm.ma_b(cond, cond, &done, Assembler::NonZero, ShortJump);
+
+ if (mirType == MIRType::Float32) {
+ masm.loadFloat32(ToAddress(falseExpr), out);
+ } else if (mirType == MIRType::Double) {
+ masm.loadDouble(ToAddress(falseExpr), out);
+ } else {
+ MOZ_CRASH("unhandled type in visitWasmSelect!");
+ }
+
+ masm.bind(&done);
+ }
+}
+
+// We expect to handle only the case where compare is {U,}Int32 and select is
+// {U,}Int32, and the "true" input is reused for the output.
+void CodeGenerator::visitWasmCompareAndSelect(LWasmCompareAndSelect* ins) {
+ bool cmpIs32bit = ins->compareType() == MCompare::Compare_Int32 ||
+ ins->compareType() == MCompare::Compare_UInt32;
+ bool selIs32bit = ins->mir()->type() == MIRType::Int32;
+
+ MOZ_RELEASE_ASSERT(
+ cmpIs32bit && selIs32bit,
+ "CodeGenerator::visitWasmCompareAndSelect: unexpected types");
+
+ Register trueExprAndDest = ToRegister(ins->output());
+ MOZ_ASSERT(ToRegister(ins->ifTrueExpr()) == trueExprAndDest,
+ "true expr input is reused for output");
+
+ Assembler::Condition cond = Assembler::InvertCondition(
+ JSOpToCondition(ins->compareType(), ins->jsop()));
+ const LAllocation* rhs = ins->rightExpr();
+ const LAllocation* falseExpr = ins->ifFalseExpr();
+ Register lhs = ToRegister(ins->leftExpr());
+
+ masm.cmp32Move32(cond, lhs, ToRegister(rhs), ToRegister(falseExpr),
+ trueExprAndDest);
+}
+
+void CodeGenerator::visitWasmReinterpret(LWasmReinterpret* lir) {
+ MOZ_ASSERT(gen->compilingWasm());
+ MWasmReinterpret* ins = lir->mir();
+
+ MIRType to = ins->type();
+ mozilla::DebugOnly<MIRType> from = ins->input()->type();
+
+ switch (to) {
+ case MIRType::Int32:
+ MOZ_ASSERT(from == MIRType::Float32);
+ masm.fmv_x_w(ToRegister(lir->output()), ToFloatRegister(lir->input()));
+ break;
+ case MIRType::Float32:
+ MOZ_ASSERT(from == MIRType::Int32);
+ masm.fmv_w_x(ToFloatRegister(lir->output()), ToRegister(lir->input()));
+ break;
+ case MIRType::Double:
+ case MIRType::Int64:
+ MOZ_CRASH("not handled by this LIR opcode");
+ default:
+ MOZ_CRASH("unexpected WasmReinterpret");
+ }
+}
+
+void CodeGenerator::visitUDivOrMod(LUDivOrMod* ins) {
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+ Register output = ToRegister(ins->output());
+ Label done;
+
+ // Prevent divide by zero.
+ if (ins->canBeDivideByZero()) {
+ if (ins->mir()->isTruncated()) {
+ if (ins->trapOnError()) {
+ Label nonZero;
+ masm.ma_b(rhs, rhs, &nonZero, Assembler::NonZero);
+ masm.wasmTrap(wasm::Trap::IntegerDivideByZero, ins->bytecodeOffset());
+ masm.bind(&nonZero);
+ } else {
+ // Infinity|0 == 0
+ Label notzero;
+ masm.ma_b(rhs, rhs, &notzero, Assembler::NonZero, ShortJump);
+ masm.move32(Imm32(0), output);
+ masm.ma_branch(&done, ShortJump);
+ masm.bind(&notzero);
+ }
+ } else {
+ bailoutCmp32(Assembler::Equal, rhs, Imm32(0), ins->snapshot());
+ }
+ }
+
+ masm.ma_modu32(output, lhs, rhs);
+
+ // If the remainder is > 0, bailout since this must be a double.
+ if (ins->mir()->isDiv()) {
+ if (!ins->mir()->toDiv()->canTruncateRemainder()) {
+ bailoutCmp32(Assembler::NonZero, output, output, ins->snapshot());
+ }
+ // Get quotient
+ masm.ma_divu32(output, lhs, rhs);
+ }
+
+ if (!ins->mir()->isTruncated()) {
+ bailoutCmp32(Assembler::LessThan, output, Imm32(0), ins->snapshot());
+ }
+
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitEffectiveAddress(LEffectiveAddress* ins) {
+ const MEffectiveAddress* mir = ins->mir();
+ Register base = ToRegister(ins->base());
+ Register index = ToRegister(ins->index());
+ Register output = ToRegister(ins->output());
+
+ BaseIndex address(base, index, mir->scale(), mir->displacement());
+ masm.computeEffectiveAddress(address, output);
+}
+
+void CodeGenerator::visitNegI(LNegI* ins) {
+ Register input = ToRegister(ins->input());
+ Register output = ToRegister(ins->output());
+
+ masm.ma_sub32(output, zero, input);
+}
+
+void CodeGenerator::visitNegI64(LNegI64* ins) {
+ Register64 input = ToRegister64(ins->getInt64Operand(0));
+ MOZ_ASSERT(input == ToOutRegister64(ins));
+ masm.neg64(input);
+}
+
+void CodeGenerator::visitNegD(LNegD* ins) {
+ FloatRegister input = ToFloatRegister(ins->input());
+ FloatRegister output = ToFloatRegister(ins->output());
+
+ masm.fneg_d(output, input);
+}
+
+void CodeGenerator::visitNegF(LNegF* ins) {
+ FloatRegister input = ToFloatRegister(ins->input());
+ FloatRegister output = ToFloatRegister(ins->output());
+
+ masm.fneg_s(output, input);
+}
+
+void CodeGenerator::visitWasmAddOffset(LWasmAddOffset* lir) {
+ MWasmAddOffset* mir = lir->mir();
+ Register base = ToRegister(lir->base());
+ Register out = ToRegister(lir->output());
+
+ Label ok;
+ masm.ma_add32TestCarry(Assembler::CarryClear, out, base, Imm32(mir->offset()),
+ &ok);
+ masm.wasmTrap(wasm::Trap::OutOfBounds, mir->bytecodeOffset());
+ masm.bind(&ok);
+}
+
+void CodeGenerator::visitWasmAddOffset64(LWasmAddOffset64* lir) {
+ MWasmAddOffset* mir = lir->mir();
+ Register64 base = ToRegister64(lir->base());
+ Register64 out = ToOutRegister64(lir);
+
+ Label ok;
+ masm.ma_addPtrTestCarry(Assembler::CarryClear, out.reg, base.reg,
+ ImmWord(mir->offset()), &ok);
+ masm.wasmTrap(wasm::Trap::OutOfBounds, mir->bytecodeOffset());
+ masm.bind(&ok);
+}
+
+void CodeGenerator::visitAtomicTypedArrayElementBinop(
+ LAtomicTypedArrayElementBinop* lir) {
+ MOZ_ASSERT(!lir->mir()->isForEffect());
+
+ AnyRegister output = ToAnyRegister(lir->output());
+ Register elements = ToRegister(lir->elements());
+ Register outTemp = ToTempRegisterOrInvalid(lir->temp2());
+ Register valueTemp = ToTempRegisterOrInvalid(lir->valueTemp());
+ Register offsetTemp = ToTempRegisterOrInvalid(lir->offsetTemp());
+ Register maskTemp = ToTempRegisterOrInvalid(lir->maskTemp());
+ Register value = ToRegister(lir->value());
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ if (lir->index()->isConstant()) {
+ Address mem = ToAddress(elements, lir->index(), arrayType);
+ masm.atomicFetchOpJS(arrayType, Synchronization::Full(),
+ lir->mir()->operation(), value, mem, valueTemp,
+ offsetTemp, maskTemp, outTemp, output);
+ } else {
+ BaseIndex mem(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.atomicFetchOpJS(arrayType, Synchronization::Full(),
+ lir->mir()->operation(), value, mem, valueTemp,
+ offsetTemp, maskTemp, outTemp, output);
+ }
+}
+
+void CodeGenerator::visitAtomicTypedArrayElementBinopForEffect(
+ LAtomicTypedArrayElementBinopForEffect* lir) {
+ MOZ_ASSERT(lir->mir()->isForEffect());
+
+ Register elements = ToRegister(lir->elements());
+ Register valueTemp = ToTempRegisterOrInvalid(lir->valueTemp());
+ Register offsetTemp = ToTempRegisterOrInvalid(lir->offsetTemp());
+ Register maskTemp = ToTempRegisterOrInvalid(lir->maskTemp());
+ Register value = ToRegister(lir->value());
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ if (lir->index()->isConstant()) {
+ Address mem = ToAddress(elements, lir->index(), arrayType);
+ masm.atomicEffectOpJS(arrayType, Synchronization::Full(),
+ lir->mir()->operation(), value, mem, valueTemp,
+ offsetTemp, maskTemp);
+ } else {
+ BaseIndex mem(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.atomicEffectOpJS(arrayType, Synchronization::Full(),
+ lir->mir()->operation(), value, mem, valueTemp,
+ offsetTemp, maskTemp);
+ }
+}
+
+void CodeGenerator::visitCompareExchangeTypedArrayElement(
+ LCompareExchangeTypedArrayElement* lir) {
+ Register elements = ToRegister(lir->elements());
+ AnyRegister output = ToAnyRegister(lir->output());
+ Register outTemp = ToTempRegisterOrInvalid(lir->temp());
+
+ Register oldval = ToRegister(lir->oldval());
+ Register newval = ToRegister(lir->newval());
+ Register valueTemp = ToTempRegisterOrInvalid(lir->valueTemp());
+ Register offsetTemp = ToTempRegisterOrInvalid(lir->offsetTemp());
+ Register maskTemp = ToTempRegisterOrInvalid(lir->maskTemp());
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.compareExchangeJS(arrayType, Synchronization::Full(), dest, oldval,
+ newval, valueTemp, offsetTemp, maskTemp, outTemp,
+ output);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.compareExchangeJS(arrayType, Synchronization::Full(), dest, oldval,
+ newval, valueTemp, offsetTemp, maskTemp, outTemp,
+ output);
+ }
+}
+
+void CodeGenerator::visitAtomicExchangeTypedArrayElement(
+ LAtomicExchangeTypedArrayElement* lir) {
+ Register elements = ToRegister(lir->elements());
+ AnyRegister output = ToAnyRegister(lir->output());
+ Register outTemp = ToTempRegisterOrInvalid(lir->temp());
+
+ Register value = ToRegister(lir->value());
+ Register valueTemp = ToTempRegisterOrInvalid(lir->valueTemp());
+ Register offsetTemp = ToTempRegisterOrInvalid(lir->offsetTemp());
+ Register maskTemp = ToTempRegisterOrInvalid(lir->maskTemp());
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.atomicExchangeJS(arrayType, Synchronization::Full(), dest, value,
+ valueTemp, offsetTemp, maskTemp, outTemp, output);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.atomicExchangeJS(arrayType, Synchronization::Full(), dest, value,
+ valueTemp, offsetTemp, maskTemp, outTemp, output);
+ }
+}
+
+void CodeGenerator::visitCompareExchangeTypedArrayElement64(
+ LCompareExchangeTypedArrayElement64* lir) {
+ Register elements = ToRegister(lir->elements());
+ Register oldval = ToRegister(lir->oldval());
+ Register newval = ToRegister(lir->newval());
+ Register64 temp1 = ToRegister64(lir->temp1());
+ Register64 temp2 = ToRegister64(lir->temp2());
+ Register out = ToRegister(lir->output());
+ Register64 tempOut(out);
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ masm.loadBigInt64(oldval, temp1);
+ masm.loadBigInt64(newval, tempOut);
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.compareExchange64(Synchronization::Full(), dest, temp1, tempOut,
+ temp2);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.compareExchange64(Synchronization::Full(), dest, temp1, tempOut,
+ temp2);
+ }
+
+ emitCreateBigInt(lir, arrayType, temp2, out, temp1.scratchReg());
+}
+
+void CodeGenerator::visitAtomicExchangeTypedArrayElement64(
+ LAtomicExchangeTypedArrayElement64* lir) {
+ Register elements = ToRegister(lir->elements());
+ Register value = ToRegister(lir->value());
+ Register64 temp1 = ToRegister64(lir->temp1());
+ Register64 temp2 = Register64(ToRegister(lir->temp2()));
+ Register out = ToRegister(lir->output());
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ masm.loadBigInt64(value, temp1);
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.atomicExchange64(Synchronization::Full(), dest, temp1, temp2);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.atomicExchange64(Synchronization::Full(), dest, temp1, temp2);
+ }
+
+ emitCreateBigInt(lir, arrayType, temp2, out, temp1.scratchReg());
+}
+
+void CodeGenerator::visitAtomicTypedArrayElementBinop64(
+ LAtomicTypedArrayElementBinop64* lir) {
+ MOZ_ASSERT(lir->mir()->hasUses());
+
+ Register elements = ToRegister(lir->elements());
+ Register value = ToRegister(lir->value());
+ Register64 temp1 = ToRegister64(lir->temp1());
+ Register64 temp2 = ToRegister64(lir->temp2());
+ Register out = ToRegister(lir->output());
+ Register64 tempOut = Register64(out);
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+ AtomicOp atomicOp = lir->mir()->operation();
+
+ masm.loadBigInt64(value, temp1);
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.atomicFetchOp64(Synchronization::Full(), atomicOp, temp1, dest,
+ tempOut, temp2);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.atomicFetchOp64(Synchronization::Full(), atomicOp, temp1, dest,
+ tempOut, temp2);
+ }
+
+ emitCreateBigInt(lir, arrayType, temp2, out, temp1.scratchReg());
+}
+
+void CodeGenerator::visitAtomicTypedArrayElementBinopForEffect64(
+ LAtomicTypedArrayElementBinopForEffect64* lir) {
+ MOZ_ASSERT(!lir->mir()->hasUses());
+
+ Register elements = ToRegister(lir->elements());
+ Register value = ToRegister(lir->value());
+ Register64 temp1 = ToRegister64(lir->temp1());
+ Register64 temp2 = ToRegister64(lir->temp2());
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+ AtomicOp atomicOp = lir->mir()->operation();
+
+ masm.loadBigInt64(value, temp1);
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.atomicEffectOp64(Synchronization::Full(), atomicOp, temp1, dest,
+ temp2);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.atomicEffectOp64(Synchronization::Full(), atomicOp, temp1, dest,
+ temp2);
+ }
+}
+
+void CodeGenerator::visitAtomicLoad64(LAtomicLoad64* lir) {
+ Register elements = ToRegister(lir->elements());
+ Register temp = ToRegister(lir->temp());
+ Register64 temp64 = ToRegister64(lir->temp64());
+ Register out = ToRegister(lir->output());
+ const MLoadUnboxedScalar* mir = lir->mir();
+
+ Scalar::Type storageType = mir->storageType();
+
+ auto sync = Synchronization::Load();
+ masm.memoryBarrierBefore(sync);
+ if (lir->index()->isConstant()) {
+ Address source =
+ ToAddress(elements, lir->index(), storageType, mir->offsetAdjustment());
+ masm.load64(source, temp64);
+ } else {
+ BaseIndex source(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(storageType), mir->offsetAdjustment());
+ masm.load64(source, temp64);
+ }
+ masm.memoryBarrierAfter(sync);
+ emitCreateBigInt(lir, storageType, temp64, out, temp);
+}
+
+void CodeGenerator::visitAtomicStore64(LAtomicStore64* lir) {
+ Register elements = ToRegister(lir->elements());
+ Register value = ToRegister(lir->value());
+ Register64 temp1 = ToRegister64(lir->temp1());
+
+ Scalar::Type writeType = lir->mir()->writeType();
+
+ masm.loadBigInt64(value, temp1);
+ auto sync = Synchronization::Store();
+ masm.memoryBarrierBefore(sync);
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), writeType);
+ masm.store64(temp1, dest);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(writeType));
+ masm.store64(temp1, dest);
+ }
+ masm.memoryBarrierAfter(sync);
+}
+
+void CodeGenerator::visitWasmCompareExchangeI64(LWasmCompareExchangeI64* lir) {
+ Register ptr = ToRegister(lir->ptr());
+ Register64 oldValue = ToRegister64(lir->oldValue());
+ Register64 newValue = ToRegister64(lir->newValue());
+ Register64 output = ToOutRegister64(lir);
+ uint32_t offset = lir->mir()->access().offset();
+
+ BaseIndex addr(HeapReg, ptr, TimesOne, offset);
+ masm.wasmCompareExchange64(lir->mir()->access(), addr, oldValue, newValue,
+ output);
+}
+
+void CodeGenerator::visitWasmAtomicExchangeI64(LWasmAtomicExchangeI64* lir) {
+ Register ptr = ToRegister(lir->ptr());
+ Register64 value = ToRegister64(lir->value());
+ Register64 output = ToOutRegister64(lir);
+ uint32_t offset = lir->mir()->access().offset();
+
+ BaseIndex addr(HeapReg, ptr, TimesOne, offset);
+ masm.wasmAtomicExchange64(lir->mir()->access(), addr, value, output);
+}
+
+void CodeGenerator::visitWasmAtomicBinopI64(LWasmAtomicBinopI64* lir) {
+ Register ptr = ToRegister(lir->ptr());
+ Register64 value = ToRegister64(lir->value());
+ Register64 output = ToOutRegister64(lir);
+ Register64 temp(ToRegister(lir->getTemp(0)));
+ uint32_t offset = lir->mir()->access().offset();
+
+ BaseIndex addr(HeapReg, ptr, TimesOne, offset);
+
+ masm.wasmAtomicFetchOp64(lir->mir()->access(), lir->mir()->operation(), value,
+ addr, temp, output);
+}
+
+void CodeGenerator::visitNearbyInt(LNearbyInt*) { MOZ_CRASH("NYI"); }
+
+void CodeGenerator::visitNearbyIntF(LNearbyIntF*) { MOZ_CRASH("NYI"); }
+
+void CodeGenerator::visitSimd128(LSimd128* ins) { MOZ_CRASH("No SIMD"); }
+
+void CodeGenerator::visitWasmTernarySimd128(LWasmTernarySimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmBinarySimd128(LWasmBinarySimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmBinarySimd128WithConstant(
+ LWasmBinarySimd128WithConstant* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmVariableShiftSimd128(
+ LWasmVariableShiftSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmConstantShiftSimd128(
+ LWasmConstantShiftSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmSignReplicationSimd128(
+ LWasmSignReplicationSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmShuffleSimd128(LWasmShuffleSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmPermuteSimd128(LWasmPermuteSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmReplaceLaneSimd128(LWasmReplaceLaneSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmReplaceInt64LaneSimd128(
+ LWasmReplaceInt64LaneSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmScalarToSimd128(LWasmScalarToSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmInt64ToSimd128(LWasmInt64ToSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmUnarySimd128(LWasmUnarySimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmReduceSimd128(LWasmReduceSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmReduceAndBranchSimd128(
+ LWasmReduceAndBranchSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmReduceSimd128ToInt64(
+ LWasmReduceSimd128ToInt64* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmLoadLaneSimd128(LWasmLoadLaneSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmStoreLaneSimd128(LWasmStoreLaneSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
diff --git a/js/src/jit/riscv64/CodeGenerator-riscv64.h b/js/src/jit/riscv64/CodeGenerator-riscv64.h
new file mode 100644
index 0000000000..793c834085
--- /dev/null
+++ b/js/src/jit/riscv64/CodeGenerator-riscv64.h
@@ -0,0 +1,210 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_riscv64_CodeGenerator_riscv64_h
+#define jit_riscv64_CodeGenerator_riscv64_h
+
+#include "jit/riscv64/Assembler-riscv64.h"
+#include "jit/riscv64/MacroAssembler-riscv64.h"
+#include "jit/shared/CodeGenerator-shared.h"
+
+namespace js {
+namespace jit {
+
+class CodeGeneratorRiscv64;
+class OutOfLineBailout;
+class OutOfLineTableSwitch;
+
+using OutOfLineWasmTruncateCheck =
+ OutOfLineWasmTruncateCheckBase<CodeGeneratorRiscv64>;
+
+class CodeGeneratorRiscv64 : public CodeGeneratorShared {
+ friend class MoveResolverLA;
+
+ protected:
+ CodeGeneratorRiscv64(MIRGenerator* gen, LIRGraph* graph,
+ MacroAssembler* masm);
+
+ NonAssertingLabel deoptLabel_;
+
+ Operand ToOperand(const LAllocation& a);
+ Operand ToOperand(const LAllocation* a);
+ Operand ToOperand(const LDefinition* def);
+
+#ifdef JS_PUNBOX64
+ Operand ToOperandOrRegister64(const LInt64Allocation input);
+#else
+ Register64 ToOperandOrRegister64(const LInt64Allocation input);
+#endif
+
+ MoveOperand toMoveOperand(LAllocation a) const;
+
+ template <typename T1, typename T2>
+ void bailoutCmp32(Assembler::Condition c, T1 lhs, T2 rhs,
+ LSnapshot* snapshot) {
+ Label bail;
+ masm.branch32(c, lhs, rhs, &bail);
+ bailoutFrom(&bail, snapshot);
+ }
+ template <typename T1, typename T2>
+ void bailoutTest32(Assembler::Condition c, T1 lhs, T2 rhs,
+ LSnapshot* snapshot) {
+ Label bail;
+ masm.branchTest32(c, lhs, rhs, &bail);
+ bailoutFrom(&bail, snapshot);
+ }
+ template <typename T1, typename T2>
+ void bailoutCmpPtr(Assembler::Condition c, T1 lhs, T2 rhs,
+ LSnapshot* snapshot) {
+ Label bail;
+ masm.branchPtr(c, lhs, rhs, &bail);
+ bailoutFrom(&bail, snapshot);
+ }
+ void bailoutTestPtr(Assembler::Condition c, Register lhs, Register rhs,
+ LSnapshot* snapshot) {
+ // TODO(riscv64) Didn't use branchTestPtr due to '-Wundefined-inline'.
+ MOZ_ASSERT(c == Assembler::Zero || c == Assembler::NonZero ||
+ c == Assembler::Signed || c == Assembler::NotSigned);
+ Label bail;
+ if (lhs == rhs) {
+ masm.ma_b(lhs, rhs, &bail, c);
+ } else {
+ ScratchRegisterScope scratch(masm);
+ masm.and_(scratch, lhs, rhs);
+ masm.ma_b(scratch, scratch, &bail, c);
+ }
+ bailoutFrom(&bail, snapshot);
+ }
+ void bailoutIfFalseBool(Register reg, LSnapshot* snapshot) {
+ Label bail;
+ ScratchRegisterScope scratch(masm);
+ masm.ma_and(scratch, reg, Imm32(0xFF));
+ masm.ma_b(scratch, scratch, &bail, Assembler::Zero);
+ bailoutFrom(&bail, snapshot);
+ }
+
+ void bailoutFrom(Label* label, LSnapshot* snapshot);
+ void bailout(LSnapshot* snapshot);
+
+ bool generateOutOfLineCode();
+
+ template <typename T>
+ void branchToBlock(Register lhs, T rhs, MBasicBlock* mir,
+ Assembler::Condition cond) {
+ masm.ma_b(lhs, rhs, skipTrivialBlocks(mir)->lir()->label(), cond);
+ }
+ void branchToBlock(FloatFormat fmt, FloatRegister lhs, FloatRegister rhs,
+ MBasicBlock* mir, Assembler::DoubleCondition cond);
+
+ // Emits a branch that directs control flow to the true block if |cond| is
+ // true, and the false block if |cond| is false.
+ template <typename T>
+ void emitBranch(Register lhs, T rhs, Assembler::Condition cond,
+ MBasicBlock* mirTrue, MBasicBlock* mirFalse) {
+ if (isNextBlock(mirFalse->lir())) {
+ branchToBlock(lhs, rhs, mirTrue, cond);
+ } else {
+ branchToBlock(lhs, rhs, mirFalse, Assembler::InvertCondition(cond));
+ jumpToBlock(mirTrue);
+ }
+ }
+ void testZeroEmitBranch(Assembler::Condition cond, Register reg,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse) {
+ emitBranch(reg, Imm32(0), cond, ifTrue, ifFalse);
+ }
+
+ void emitTableSwitchDispatch(MTableSwitch* mir, Register index,
+ Register base);
+
+ template <typename T>
+ void emitWasmLoad(T* ins);
+ template <typename T>
+ void emitWasmStore(T* ins);
+
+ void generateInvalidateEpilogue();
+
+ // Generating a result.
+ template <typename S, typename T>
+ void atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
+ const S& value, const T& mem,
+ Register flagTemp, Register outTemp,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, AnyRegister output);
+
+ // Generating no result.
+ template <typename S, typename T>
+ void atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
+ const S& value, const T& mem,
+ Register flagTemp, Register valueTemp,
+ Register offsetTemp, Register maskTemp);
+
+ public:
+ // Out of line visitors.
+ void visitOutOfLineBailout(OutOfLineBailout* ool);
+ void visitOutOfLineTableSwitch(OutOfLineTableSwitch* ool);
+ void visitOutOfLineWasmTruncateCheck(OutOfLineWasmTruncateCheck* ool);
+
+ protected:
+ void testNullEmitBranch(Assembler::Condition cond, const ValueOperand& value,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse) {
+ UseScratchRegisterScope temps(&masm);
+ Register scratch = temps.Acquire();
+ masm.splitTag(value.valueReg(), scratch);
+ emitBranch(scratch, ImmTag(JSVAL_TAG_NULL), cond, ifTrue, ifFalse);
+ }
+ void testUndefinedEmitBranch(Assembler::Condition cond,
+ const ValueOperand& value, MBasicBlock* ifTrue,
+ MBasicBlock* ifFalse) {
+ UseScratchRegisterScope temps(&masm);
+ Register scratch = temps.Acquire();
+ masm.splitTag(value.valueReg(), scratch);
+ emitBranch(scratch, ImmTag(JSVAL_TAG_UNDEFINED), cond, ifTrue, ifFalse);
+ }
+ void testObjectEmitBranch(Assembler::Condition cond,
+ const ValueOperand& value, MBasicBlock* ifTrue,
+ MBasicBlock* ifFalse) {
+ UseScratchRegisterScope temps(&masm);
+ Register scratch = temps.Acquire();
+ masm.splitTag(value.valueReg(), scratch);
+ emitBranch(scratch, ImmTag(JSVAL_TAG_OBJECT), cond, ifTrue, ifFalse);
+ }
+
+ void emitBigIntDiv(LBigIntDiv* ins, Register dividend, Register divisor,
+ Register output, Label* fail);
+ void emitBigIntMod(LBigIntMod* ins, Register dividend, Register divisor,
+ Register output, Label* fail);
+
+ template <typename T>
+ void emitWasmLoadI64(T* ins);
+ template <typename T>
+ void emitWasmStoreI64(T* ins);
+
+ ValueOperand ToValue(LInstruction* ins, size_t pos);
+ ValueOperand ToTempValue(LInstruction* ins, size_t pos);
+
+ // Functions for LTestVAndBranch.
+ void splitTagForTest(const ValueOperand& value, ScratchTagScope& tag);
+};
+
+typedef CodeGeneratorRiscv64 CodeGeneratorSpecific;
+
+// An out-of-line bailout thunk.
+class OutOfLineBailout : public OutOfLineCodeBase<CodeGeneratorRiscv64> {
+ protected:
+ LSnapshot* snapshot_;
+
+ public:
+ OutOfLineBailout(LSnapshot* snapshot) : snapshot_(snapshot) {}
+
+ void accept(CodeGeneratorRiscv64* codegen) override;
+
+ LSnapshot* snapshot() const { return snapshot_; }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_riscv64_CodeGenerator_riscv64_h */
diff --git a/js/src/jit/riscv64/LIR-riscv64.h b/js/src/jit/riscv64/LIR-riscv64.h
new file mode 100644
index 0000000000..7143919608
--- /dev/null
+++ b/js/src/jit/riscv64/LIR-riscv64.h
@@ -0,0 +1,399 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_riscv64_LIR_riscv64_h
+#define jit_riscv64_LIR_riscv64_h
+
+namespace js {
+namespace jit {
+
+class LUnbox : public LInstructionHelper<1, 1, 0> {
+ protected:
+ LUnbox(LNode::Opcode opcode, const LAllocation& input)
+ : LInstructionHelper(opcode) {
+ setOperand(0, input);
+ }
+
+ public:
+ LIR_HEADER(Unbox);
+
+ explicit LUnbox(const LAllocation& input) : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ }
+
+ static const size_t Input = 0;
+
+ MUnbox* mir() const { return mir_->toUnbox(); }
+ const char* extraName() const { return StringFromMIRType(mir()->type()); }
+};
+
+class LUnboxFloatingPoint : public LUnbox {
+ MIRType type_;
+
+ public:
+ LIR_HEADER(UnboxFloatingPoint);
+
+ LUnboxFloatingPoint(const LAllocation& input, MIRType type)
+ : LUnbox(classOpcode, input), type_(type) {}
+
+ MIRType type() const { return type_; }
+};
+
+// Convert a 32-bit unsigned integer to a double.
+class LWasmUint32ToDouble : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(WasmUint32ToDouble)
+
+ explicit LWasmUint32ToDouble(const LAllocation& input)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ }
+};
+
+// Convert a 32-bit unsigned integer to a float32.
+class LWasmUint32ToFloat32 : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(WasmUint32ToFloat32)
+
+ explicit LWasmUint32ToFloat32(const LAllocation& input)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ }
+};
+
+class LDivI : public LBinaryMath<1> {
+ public:
+ LIR_HEADER(DivI);
+
+ LDivI(const LAllocation& lhs, const LAllocation& rhs, const LDefinition& temp)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp);
+ }
+
+ MDiv* mir() const { return mir_->toDiv(); }
+};
+
+class LDivPowTwoI : public LInstructionHelper<1, 1, 1> {
+ const int32_t shift_;
+
+ public:
+ LIR_HEADER(DivPowTwoI)
+
+ LDivPowTwoI(const LAllocation& lhs, int32_t shift, const LDefinition& temp)
+ : LInstructionHelper(classOpcode), shift_(shift) {
+ setOperand(0, lhs);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* numerator() { return getOperand(0); }
+ int32_t shift() const { return shift_; }
+ MDiv* mir() const { return mir_->toDiv(); }
+};
+
+class LModI : public LBinaryMath<1> {
+ public:
+ LIR_HEADER(ModI);
+
+ LModI(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& callTemp)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, callTemp);
+ }
+
+ const LDefinition* callTemp() { return getTemp(0); }
+ MMod* mir() const { return mir_->toMod(); }
+};
+
+class LModPowTwoI : public LInstructionHelper<1, 1, 0> {
+ const int32_t shift_;
+
+ public:
+ LIR_HEADER(ModPowTwoI);
+
+ LModPowTwoI(const LAllocation& lhs, int32_t shift)
+ : LInstructionHelper(classOpcode), shift_(shift) {
+ setOperand(0, lhs);
+ }
+
+ int32_t shift() const { return shift_; }
+ MMod* mir() const { return mir_->toMod(); }
+};
+
+class LModMaskI : public LInstructionHelper<1, 1, 2> {
+ const int32_t shift_;
+
+ public:
+ LIR_HEADER(ModMaskI);
+
+ LModMaskI(const LAllocation& lhs, const LDefinition& temp0,
+ const LDefinition& temp1, int32_t shift)
+ : LInstructionHelper(classOpcode), shift_(shift) {
+ setOperand(0, lhs);
+ setTemp(0, temp0);
+ setTemp(1, temp1);
+ }
+
+ int32_t shift() const { return shift_; }
+ MMod* mir() const { return mir_->toMod(); }
+};
+
+// Takes a tableswitch with an integer to decide
+class LTableSwitch : public LInstructionHelper<0, 1, 2> {
+ public:
+ LIR_HEADER(TableSwitch);
+
+ LTableSwitch(const LAllocation& in, const LDefinition& inputCopy,
+ const LDefinition& jumpTablePointer, MTableSwitch* ins)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, in);
+ setTemp(0, inputCopy);
+ setTemp(1, jumpTablePointer);
+ setMir(ins);
+ }
+
+ MTableSwitch* mir() const { return mir_->toTableSwitch(); }
+ const LAllocation* index() { return getOperand(0); }
+ const LDefinition* tempInt() { return getTemp(0); }
+ // This is added to share the same CodeGenerator prefixes.
+ const LDefinition* tempPointer() { return getTemp(1); }
+};
+
+// Takes a tableswitch with an integer to decide
+class LTableSwitchV : public LInstructionHelper<0, BOX_PIECES, 3> {
+ public:
+ LIR_HEADER(TableSwitchV);
+
+ LTableSwitchV(const LBoxAllocation& input, const LDefinition& inputCopy,
+ const LDefinition& floatCopy,
+ const LDefinition& jumpTablePointer, MTableSwitch* ins)
+ : LInstructionHelper(classOpcode) {
+ setBoxOperand(InputValue, input);
+ setTemp(0, inputCopy);
+ setTemp(1, floatCopy);
+ setTemp(2, jumpTablePointer);
+ setMir(ins);
+ }
+
+ MTableSwitch* mir() const { return mir_->toTableSwitch(); }
+
+ static const size_t InputValue = 0;
+
+ const LDefinition* tempInt() { return getTemp(0); }
+ const LDefinition* tempFloat() { return getTemp(1); }
+ const LDefinition* tempPointer() { return getTemp(2); }
+};
+
+class LMulI : public LBinaryMath<0> {
+ public:
+ LIR_HEADER(MulI);
+
+ LMulI() : LBinaryMath(classOpcode) {}
+
+ MMul* mir() { return mir_->toMul(); }
+};
+
+class LUDivOrMod : public LBinaryMath<0> {
+ public:
+ LIR_HEADER(UDivOrMod);
+
+ LUDivOrMod() : LBinaryMath(classOpcode) {}
+
+ MBinaryArithInstruction* mir() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ return static_cast<MBinaryArithInstruction*>(mir_);
+ }
+
+ bool canBeDivideByZero() const {
+ if (mir_->isMod()) {
+ return mir_->toMod()->canBeDivideByZero();
+ }
+ return mir_->toDiv()->canBeDivideByZero();
+ }
+
+ bool trapOnError() const {
+ if (mir_->isMod()) {
+ return mir_->toMod()->trapOnError();
+ }
+ return mir_->toDiv()->trapOnError();
+ }
+
+ wasm::BytecodeOffset bytecodeOffset() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ if (mir_->isMod()) {
+ return mir_->toMod()->bytecodeOffset();
+ }
+ return mir_->toDiv()->bytecodeOffset();
+ }
+};
+
+class LWasmCompareExchangeI64
+ : public LInstructionHelper<INT64_PIECES, 1 + INT64_PIECES + INT64_PIECES,
+ 0> {
+ public:
+ LIR_HEADER(WasmCompareExchangeI64);
+
+ LWasmCompareExchangeI64(const LAllocation& ptr,
+ const LInt64Allocation& oldValue,
+ const LInt64Allocation& newValue)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, ptr);
+ setInt64Operand(1, oldValue);
+ setInt64Operand(1 + INT64_PIECES, newValue);
+ }
+
+ const LAllocation* ptr() { return getOperand(0); }
+ const LInt64Allocation oldValue() { return getInt64Operand(1); }
+ const LInt64Allocation newValue() {
+ return getInt64Operand(1 + INT64_PIECES);
+ }
+ const MWasmCompareExchangeHeap* mir() const {
+ return mir_->toWasmCompareExchangeHeap();
+ }
+};
+
+class LWasmAtomicExchangeI64
+ : public LInstructionHelper<INT64_PIECES, 1 + INT64_PIECES, 0> {
+ public:
+ LIR_HEADER(WasmAtomicExchangeI64);
+
+ LWasmAtomicExchangeI64(const LAllocation& ptr, const LInt64Allocation& value)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, ptr);
+ setInt64Operand(1, value);
+ }
+
+ const LAllocation* ptr() { return getOperand(0); }
+ const LInt64Allocation value() { return getInt64Operand(1); }
+ const MWasmAtomicExchangeHeap* mir() const {
+ return mir_->toWasmAtomicExchangeHeap();
+ }
+};
+
+class LWasmAtomicBinopI64
+ : public LInstructionHelper<INT64_PIECES, 1 + INT64_PIECES, 2> {
+ public:
+ LIR_HEADER(WasmAtomicBinopI64);
+
+ LWasmAtomicBinopI64(const LAllocation& ptr, const LInt64Allocation& value)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, ptr);
+ setInt64Operand(1, value);
+ }
+
+ const LAllocation* ptr() { return getOperand(0); }
+ const LInt64Allocation value() { return getInt64Operand(1); }
+ const MWasmAtomicBinopHeap* mir() const {
+ return mir_->toWasmAtomicBinopHeap();
+ }
+};
+
+class LDivOrModI64 : public LBinaryMath<1> {
+ public:
+ LIR_HEADER(DivOrModI64)
+
+ LDivOrModI64(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& temp)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* remainder() { return getTemp(0); }
+ MBinaryArithInstruction* mir() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ return static_cast<MBinaryArithInstruction*>(mir_);
+ }
+
+ bool canBeDivideByZero() const {
+ if (mir_->isMod()) {
+ return mir_->toMod()->canBeDivideByZero();
+ }
+ return mir_->toDiv()->canBeDivideByZero();
+ }
+ bool canBeNegativeOverflow() const {
+ if (mir_->isMod()) {
+ return mir_->toMod()->canBeNegativeDividend();
+ }
+ return mir_->toDiv()->canBeNegativeOverflow();
+ }
+ wasm::BytecodeOffset bytecodeOffset() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ if (mir_->isMod()) {
+ return mir_->toMod()->bytecodeOffset();
+ }
+ return mir_->toDiv()->bytecodeOffset();
+ }
+};
+
+class LUDivOrModI64 : public LBinaryMath<1> {
+ public:
+ LIR_HEADER(UDivOrModI64);
+
+ LUDivOrModI64(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& temp)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* remainder() { return getTemp(0); }
+ const char* extraName() const {
+ return mir()->isTruncated() ? "Truncated" : nullptr;
+ }
+
+ MBinaryArithInstruction* mir() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ return static_cast<MBinaryArithInstruction*>(mir_);
+ }
+ bool canBeDivideByZero() const {
+ if (mir_->isMod()) {
+ return mir_->toMod()->canBeDivideByZero();
+ }
+ return mir_->toDiv()->canBeDivideByZero();
+ }
+ wasm::BytecodeOffset bytecodeOffset() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ if (mir_->isMod()) {
+ return mir_->toMod()->bytecodeOffset();
+ }
+ return mir_->toDiv()->bytecodeOffset();
+ }
+};
+
+class LWasmTruncateToInt64 : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(WasmTruncateToInt64);
+
+ explicit LWasmTruncateToInt64(const LAllocation& in)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, in);
+ }
+
+ MWasmTruncateToInt64* mir() const { return mir_->toWasmTruncateToInt64(); }
+};
+
+class LInt64ToFloatingPoint : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(Int64ToFloatingPoint);
+
+ explicit LInt64ToFloatingPoint(const LInt64Allocation& in)
+ : LInstructionHelper(classOpcode) {
+ setInt64Operand(0, in);
+ }
+
+ MInt64ToFloatingPoint* mir() const { return mir_->toInt64ToFloatingPoint(); }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_riscv64_LIR_riscv64_h */
diff --git a/js/src/jit/riscv64/Lowering-riscv64.cpp b/js/src/jit/riscv64/Lowering-riscv64.cpp
new file mode 100644
index 0000000000..b32896694a
--- /dev/null
+++ b/js/src/jit/riscv64/Lowering-riscv64.cpp
@@ -0,0 +1,1087 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/riscv64/Lowering-riscv64.h"
+
+#include "mozilla/MathAlgorithms.h"
+
+#include "jit/Lowering.h"
+#include "jit/MIR.h"
+#include "jit/riscv64/Assembler-riscv64.h"
+
+#include "jit/shared/Lowering-shared-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::FloorLog2;
+
+LTableSwitch* LIRGeneratorRiscv64::newLTableSwitch(const LAllocation& in,
+ const LDefinition& inputCopy,
+ MTableSwitch* tableswitch) {
+ return new (alloc()) LTableSwitch(in, inputCopy, temp(), tableswitch);
+}
+
+LTableSwitchV* LIRGeneratorRiscv64::newLTableSwitchV(
+ MTableSwitch* tableswitch) {
+ return new (alloc()) LTableSwitchV(useBox(tableswitch->getOperand(0)), temp(),
+ tempDouble(), temp(), tableswitch);
+}
+
+void LIRGeneratorRiscv64::lowerForShift(LInstructionHelper<1, 2, 0>* ins,
+ MDefinition* mir, MDefinition* lhs,
+ MDefinition* rhs) {
+ ins->setOperand(0, useRegister(lhs));
+ ins->setOperand(1, useRegisterOrConstant(rhs));
+ define(ins, mir);
+}
+
+template <size_t Temps>
+void LIRGeneratorRiscv64::lowerForShiftInt64(
+ LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, Temps>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs) {
+ ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
+
+ static_assert(LShiftI64::Rhs == INT64_PIECES,
+ "Assume Rhs is located at INT64_PIECES.");
+ static_assert(LRotateI64::Count == INT64_PIECES,
+ "Assume Count is located at INT64_PIECES.");
+
+ ins->setOperand(INT64_PIECES, useRegisterOrConstant(rhs));
+
+ defineInt64ReuseInput(ins, mir, 0);
+}
+
+template void LIRGeneratorRiscv64::lowerForShiftInt64(
+ LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, 0>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
+template void LIRGeneratorRiscv64::lowerForShiftInt64(
+ LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, 1>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
+
+// x = !y
+void LIRGeneratorRiscv64::lowerForALU(LInstructionHelper<1, 1, 0>* ins,
+ MDefinition* mir, MDefinition* input) {
+ ins->setOperand(0, useRegister(input));
+ define(
+ ins, mir,
+ LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
+}
+
+// z = x + y
+void LIRGeneratorRiscv64::lowerForALU(LInstructionHelper<1, 2, 0>* ins,
+ MDefinition* mir, MDefinition* lhs,
+ MDefinition* rhs) {
+ ins->setOperand(0, useRegister(lhs));
+ ins->setOperand(1, useRegisterOrConstant(rhs));
+ define(
+ ins, mir,
+ LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
+}
+
+void LIRGeneratorRiscv64::lowerForALUInt64(
+ LInstructionHelper<INT64_PIECES, INT64_PIECES, 0>* ins, MDefinition* mir,
+ MDefinition* input) {
+ ins->setInt64Operand(0, useInt64RegisterAtStart(input));
+ defineInt64ReuseInput(ins, mir, 0);
+}
+
+void LIRGeneratorRiscv64::lowerForALUInt64(
+ LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs) {
+ ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
+ ins->setInt64Operand(INT64_PIECES, willHaveDifferentLIRNodes(lhs, rhs)
+ ? useInt64OrConstant(rhs)
+ : useInt64OrConstantAtStart(rhs));
+ defineInt64ReuseInput(ins, mir, 0);
+}
+
+void LIRGeneratorRiscv64::lowerForMulInt64(LMulI64* ins, MMul* mir,
+ MDefinition* lhs, MDefinition* rhs) {
+ bool needsTemp = false;
+ bool cannotAliasRhs = false;
+ bool reuseInput = true;
+
+ ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
+ ins->setInt64Operand(INT64_PIECES,
+ (willHaveDifferentLIRNodes(lhs, rhs) || cannotAliasRhs)
+ ? useInt64OrConstant(rhs)
+ : useInt64OrConstantAtStart(rhs));
+
+ if (needsTemp) {
+ ins->setTemp(0, temp());
+ }
+ if (reuseInput) {
+ defineInt64ReuseInput(ins, mir, 0);
+ } else {
+ defineInt64(ins, mir);
+ }
+}
+
+void LIRGeneratorRiscv64::lowerForFPU(LInstructionHelper<1, 1, 0>* ins,
+ MDefinition* mir, MDefinition* input) {
+ ins->setOperand(0, useRegister(input));
+ define(
+ ins, mir,
+ LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
+}
+
+template <size_t Temps>
+void LIRGeneratorRiscv64::lowerForFPU(LInstructionHelper<1, 2, Temps>* ins,
+ MDefinition* mir, MDefinition* lhs,
+ MDefinition* rhs) {
+ ins->setOperand(0, useRegister(lhs));
+ ins->setOperand(1, useRegister(rhs));
+ define(
+ ins, mir,
+ LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
+}
+
+template void LIRGeneratorRiscv64::lowerForFPU(LInstructionHelper<1, 2, 0>* ins,
+ MDefinition* mir,
+ MDefinition* lhs,
+ MDefinition* rhs);
+template void LIRGeneratorRiscv64::lowerForFPU(LInstructionHelper<1, 2, 1>* ins,
+ MDefinition* mir,
+ MDefinition* lhs,
+ MDefinition* rhs);
+
+void LIRGeneratorRiscv64::lowerForCompareI64AndBranch(
+ MTest* mir, MCompare* comp, JSOp op, MDefinition* left, MDefinition* right,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse) {
+ LCompareI64AndBranch* lir = new (alloc())
+ LCompareI64AndBranch(comp, op, useInt64Register(left),
+ useInt64OrConstant(right), ifTrue, ifFalse);
+ add(lir, mir);
+}
+
+void LIRGeneratorRiscv64::lowerForBitAndAndBranch(LBitAndAndBranch* baab,
+ MInstruction* mir,
+ MDefinition* lhs,
+ MDefinition* rhs) {
+ baab->setOperand(0, useRegisterAtStart(lhs));
+ baab->setOperand(1, useRegisterOrConstantAtStart(rhs));
+ add(baab, mir);
+}
+
+LBoxAllocation LIRGeneratorRiscv64::useBoxFixed(MDefinition* mir, Register reg1,
+ Register reg2,
+ bool useAtStart) {
+ MOZ_ASSERT(mir->type() == MIRType::Value);
+
+ ensureDefined(mir);
+ return LBoxAllocation(LUse(reg1, mir->virtualRegister(), useAtStart));
+}
+
+LAllocation LIRGeneratorRiscv64::useByteOpRegister(MDefinition* mir) {
+ return useRegister(mir);
+}
+
+LAllocation LIRGeneratorRiscv64::useByteOpRegisterAtStart(MDefinition* mir) {
+ return useRegisterAtStart(mir);
+}
+
+LAllocation LIRGeneratorRiscv64::useByteOpRegisterOrNonDoubleConstant(
+ MDefinition* mir) {
+ return useRegisterOrNonDoubleConstant(mir);
+}
+
+LDefinition LIRGeneratorRiscv64::tempByteOpRegister() { return temp(); }
+LDefinition LIRGeneratorRiscv64::tempToUnbox() { return temp(); }
+
+void LIRGeneratorRiscv64::lowerUntypedPhiInput(MPhi* phi,
+ uint32_t inputPosition,
+ LBlock* block, size_t lirIndex) {
+ lowerTypedPhiInput(phi, inputPosition, block, lirIndex);
+}
+void LIRGeneratorRiscv64::lowerInt64PhiInput(MPhi* phi, uint32_t inputPosition,
+ LBlock* block, size_t lirIndex) {
+ lowerTypedPhiInput(phi, inputPosition, block, lirIndex);
+}
+void LIRGeneratorRiscv64::defineInt64Phi(MPhi* phi, size_t lirIndex) {
+ defineTypedPhi(phi, lirIndex);
+}
+
+void LIRGeneratorRiscv64::lowerNegI(MInstruction* ins, MDefinition* input) {
+ define(new (alloc()) LNegI(useRegisterAtStart(input)), ins);
+}
+void LIRGeneratorRiscv64::lowerNegI64(MInstruction* ins, MDefinition* input) {
+ defineInt64ReuseInput(new (alloc()) LNegI64(useInt64RegisterAtStart(input)),
+ ins, 0);
+}
+
+void LIRGeneratorRiscv64::lowerMulI(MMul* mul, MDefinition* lhs,
+ MDefinition* rhs) {
+ LMulI* lir = new (alloc()) LMulI;
+ if (mul->fallible()) {
+ assignSnapshot(lir, mul->bailoutKind());
+ }
+
+ lowerForALU(lir, mul, lhs, rhs);
+}
+
+void LIRGeneratorRiscv64::lowerDivI(MDiv* div) {
+ if (div->isUnsigned()) {
+ lowerUDiv(div);
+ return;
+ }
+
+ // Division instructions are slow. Division by constant denominators can be
+ // rewritten to use other instructions.
+ if (div->rhs()->isConstant()) {
+ int32_t rhs = div->rhs()->toConstant()->toInt32();
+ // Check for division by a positive power of two, which is an easy and
+ // important case to optimize. Note that other optimizations are also
+ // possible; division by negative powers of two can be optimized in a
+ // similar manner as positive powers of two, and division by other
+ // constants can be optimized by a reciprocal multiplication technique.
+ int32_t shift = FloorLog2(rhs);
+ if (rhs > 0 && 1 << shift == rhs) {
+ LDivPowTwoI* lir =
+ new (alloc()) LDivPowTwoI(useRegister(div->lhs()), shift, temp());
+ if (div->fallible()) {
+ assignSnapshot(lir, div->bailoutKind());
+ }
+ define(lir, div);
+ return;
+ }
+ }
+
+ LDivI* lir = new (alloc())
+ LDivI(useRegister(div->lhs()), useRegister(div->rhs()), temp());
+ if (div->fallible()) {
+ assignSnapshot(lir, div->bailoutKind());
+ }
+ define(lir, div);
+}
+
+void LIRGeneratorRiscv64::lowerDivI64(MDiv* div) {
+ if (div->isUnsigned()) {
+ lowerUDivI64(div);
+ return;
+ }
+
+ LDivOrModI64* lir = new (alloc())
+ LDivOrModI64(useRegister(div->lhs()), useRegister(div->rhs()), temp());
+ defineInt64(lir, div);
+}
+
+void LIRGeneratorRiscv64::lowerModI(MMod* mod) {
+ if (mod->isUnsigned()) {
+ lowerUMod(mod);
+ return;
+ }
+
+ if (mod->rhs()->isConstant()) {
+ int32_t rhs = mod->rhs()->toConstant()->toInt32();
+ int32_t shift = FloorLog2(rhs);
+ if (rhs > 0 && 1 << shift == rhs) {
+ LModPowTwoI* lir =
+ new (alloc()) LModPowTwoI(useRegister(mod->lhs()), shift);
+ if (mod->fallible()) {
+ assignSnapshot(lir, mod->bailoutKind());
+ }
+ define(lir, mod);
+ return;
+ } else if (shift < 31 && (1 << (shift + 1)) - 1 == rhs) {
+ LModMaskI* lir = new (alloc())
+ LModMaskI(useRegister(mod->lhs()), temp(LDefinition::GENERAL),
+ temp(LDefinition::GENERAL), shift + 1);
+ if (mod->fallible()) {
+ assignSnapshot(lir, mod->bailoutKind());
+ }
+ define(lir, mod);
+ return;
+ }
+ }
+ LModI* lir =
+ new (alloc()) LModI(useRegister(mod->lhs()), useRegister(mod->rhs()),
+ temp(LDefinition::GENERAL));
+
+ if (mod->fallible()) {
+ assignSnapshot(lir, mod->bailoutKind());
+ }
+ define(lir, mod);
+}
+
+void LIRGeneratorRiscv64::lowerModI64(MMod* mod) {
+ if (mod->isUnsigned()) {
+ lowerUModI64(mod);
+ return;
+ }
+
+ LDivOrModI64* lir = new (alloc())
+ LDivOrModI64(useRegister(mod->lhs()), useRegister(mod->rhs()), temp());
+ defineInt64(lir, mod);
+}
+
+void LIRGeneratorRiscv64::lowerUDiv(MDiv* div) {
+ MDefinition* lhs = div->getOperand(0);
+ MDefinition* rhs = div->getOperand(1);
+
+ LUDivOrMod* lir = new (alloc()) LUDivOrMod;
+ lir->setOperand(0, useRegister(lhs));
+ lir->setOperand(1, useRegister(rhs));
+ if (div->fallible()) {
+ assignSnapshot(lir, div->bailoutKind());
+ }
+
+ define(lir, div);
+}
+
+void LIRGeneratorRiscv64::lowerUDivI64(MDiv* div) {
+ LUDivOrModI64* lir = new (alloc())
+ LUDivOrModI64(useRegister(div->lhs()), useRegister(div->rhs()), temp());
+ defineInt64(lir, div);
+}
+
+void LIRGeneratorRiscv64::lowerUMod(MMod* mod) {
+ MDefinition* lhs = mod->getOperand(0);
+ MDefinition* rhs = mod->getOperand(1);
+
+ LUDivOrMod* lir = new (alloc()) LUDivOrMod;
+ lir->setOperand(0, useRegister(lhs));
+ lir->setOperand(1, useRegister(rhs));
+ if (mod->fallible()) {
+ assignSnapshot(lir, mod->bailoutKind());
+ }
+
+ define(lir, mod);
+}
+
+void LIRGeneratorRiscv64::lowerUModI64(MMod* mod) {
+ LUDivOrModI64* lir = new (alloc())
+ LUDivOrModI64(useRegister(mod->lhs()), useRegister(mod->rhs()), temp());
+ defineInt64(lir, mod);
+}
+
+void LIRGeneratorRiscv64::lowerUrshD(MUrsh* mir) {
+ MDefinition* lhs = mir->lhs();
+ MDefinition* rhs = mir->rhs();
+
+ MOZ_ASSERT(lhs->type() == MIRType::Int32);
+ MOZ_ASSERT(rhs->type() == MIRType::Int32);
+
+ LUrshD* lir = new (alloc())
+ LUrshD(useRegister(lhs), useRegisterOrConstant(rhs), temp());
+ define(lir, mir);
+}
+
+void LIRGeneratorRiscv64::lowerPowOfTwoI(MPow* mir) {
+ int32_t base = mir->input()->toConstant()->toInt32();
+ MDefinition* power = mir->power();
+
+ auto* lir = new (alloc()) LPowOfTwoI(useRegister(power), base);
+ assignSnapshot(lir, mir->bailoutKind());
+ define(lir, mir);
+}
+
+void LIRGeneratorRiscv64::lowerBigIntDiv(MBigIntDiv* ins) {
+ auto* lir = new (alloc()) LBigIntDiv(useRegister(ins->lhs()),
+ useRegister(ins->rhs()), temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGeneratorRiscv64::lowerBigIntMod(MBigIntMod* ins) {
+ auto* lir = new (alloc()) LBigIntMod(useRegister(ins->lhs()),
+ useRegister(ins->rhs()), temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGeneratorRiscv64::lowerBigIntLsh(MBigIntLsh* ins) {
+ auto* lir = new (alloc()) LBigIntLsh(
+ useRegister(ins->lhs()), useRegister(ins->rhs()), temp(), temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGeneratorRiscv64::lowerBigIntRsh(MBigIntRsh* ins) {
+ auto* lir = new (alloc()) LBigIntRsh(
+ useRegister(ins->lhs()), useRegister(ins->rhs()), temp(), temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGeneratorRiscv64::lowerTruncateDToInt32(MTruncateToInt32* ins) {
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Double);
+
+ define(new (alloc()) LTruncateDToInt32(useRegister(opd), tempDouble()), ins);
+}
+
+void LIRGeneratorRiscv64::lowerTruncateFToInt32(MTruncateToInt32* ins) {
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Float32);
+
+ define(new (alloc()) LTruncateFToInt32(useRegister(opd), tempFloat32()), ins);
+}
+
+void LIRGeneratorRiscv64::lowerBuiltinInt64ToFloatingPoint(
+ MBuiltinInt64ToFloatingPoint* ins) {
+ MOZ_CRASH("We don't use it for this architecture");
+}
+
+void LIRGeneratorRiscv64::lowerWasmSelectI(MWasmSelect* select) {
+ auto* lir = new (alloc())
+ LWasmSelect(useRegisterAtStart(select->trueExpr()),
+ useAny(select->falseExpr()), useRegister(select->condExpr()));
+ defineReuseInput(lir, select, LWasmSelect::TrueExprIndex);
+}
+
+void LIRGeneratorRiscv64::lowerWasmSelectI64(MWasmSelect* select) {
+ auto* lir = new (alloc()) LWasmSelectI64(
+ useInt64RegisterAtStart(select->trueExpr()),
+ useInt64(select->falseExpr()), useRegister(select->condExpr()));
+ defineInt64ReuseInput(lir, select, LWasmSelectI64::TrueExprIndex);
+}
+
+// On loong64 we specialize the only cases where compare is {U,}Int32 and select
+// is {U,}Int32.
+bool LIRGeneratorShared::canSpecializeWasmCompareAndSelect(
+ MCompare::CompareType compTy, MIRType insTy) {
+ return insTy == MIRType::Int32 && (compTy == MCompare::Compare_Int32 ||
+ compTy == MCompare::Compare_UInt32);
+}
+
+void LIRGeneratorShared::lowerWasmCompareAndSelect(MWasmSelect* ins,
+ MDefinition* lhs,
+ MDefinition* rhs,
+ MCompare::CompareType compTy,
+ JSOp jsop) {
+ MOZ_ASSERT(canSpecializeWasmCompareAndSelect(compTy, ins->type()));
+ auto* lir = new (alloc()) LWasmCompareAndSelect(
+ useRegister(lhs), useRegister(rhs), compTy, jsop,
+ useRegisterAtStart(ins->trueExpr()), useRegister(ins->falseExpr()));
+ defineReuseInput(lir, ins, LWasmCompareAndSelect::IfTrueExprIndex);
+}
+
+void LIRGeneratorRiscv64::lowerWasmBuiltinTruncateToInt32(
+ MWasmBuiltinTruncateToInt32* ins) {
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Double || opd->type() == MIRType::Float32);
+
+ if (opd->type() == MIRType::Double) {
+ define(new (alloc()) LWasmBuiltinTruncateDToInt32(
+ useRegister(opd), useFixed(ins->instance(), InstanceReg),
+ LDefinition::BogusTemp()),
+ ins);
+ return;
+ }
+
+ define(new (alloc()) LWasmBuiltinTruncateFToInt32(
+ useRegister(opd), useFixed(ins->instance(), InstanceReg),
+ LDefinition::BogusTemp()),
+ ins);
+}
+
+void LIRGeneratorRiscv64::lowerWasmBuiltinTruncateToInt64(
+ MWasmBuiltinTruncateToInt64* ins) {
+ MOZ_CRASH("We don't use it for this architecture");
+}
+
+void LIRGeneratorRiscv64::lowerWasmBuiltinDivI64(MWasmBuiltinDivI64* div) {
+ MOZ_CRASH("We don't use runtime div for this architecture");
+}
+
+void LIRGeneratorRiscv64::lowerWasmBuiltinModI64(MWasmBuiltinModI64* mod) {
+ MOZ_CRASH("We don't use runtime mod for this architecture");
+}
+
+void LIRGeneratorRiscv64::lowerAtomicLoad64(MLoadUnboxedScalar* ins) {
+ const LUse elements = useRegister(ins->elements());
+ const LAllocation index =
+ useRegisterOrIndexConstant(ins->index(), ins->storageType());
+
+ auto* lir = new (alloc()) LAtomicLoad64(elements, index, temp(), tempInt64());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGeneratorRiscv64::lowerAtomicStore64(MStoreUnboxedScalar* ins) {
+ LUse elements = useRegister(ins->elements());
+ LAllocation index =
+ useRegisterOrIndexConstant(ins->index(), ins->writeType());
+ LAllocation value = useRegister(ins->value());
+
+ add(new (alloc()) LAtomicStore64(elements, index, value, tempInt64()), ins);
+}
+
+void LIRGenerator::visitBox(MBox* box) {
+ MDefinition* opd = box->getOperand(0);
+
+ // If the operand is a constant, emit near its uses.
+ if (opd->isConstant() && box->canEmitAtUses()) {
+ emitAtUses(box);
+ return;
+ }
+
+ if (opd->isConstant()) {
+ define(new (alloc()) LValue(opd->toConstant()->toJSValue()), box,
+ LDefinition(LDefinition::BOX));
+ } else {
+ LBox* ins = new (alloc()) LBox(useRegister(opd), opd->type());
+ define(ins, box, LDefinition(LDefinition::BOX));
+ }
+}
+
+void LIRGenerator::visitUnbox(MUnbox* unbox) {
+ MDefinition* box = unbox->getOperand(0);
+ MOZ_ASSERT(box->type() == MIRType::Value);
+
+ LUnbox* lir;
+ if (IsFloatingPointType(unbox->type())) {
+ lir = new (alloc())
+ LUnboxFloatingPoint(useRegisterAtStart(box), unbox->type());
+ } else if (unbox->fallible()) {
+ // If the unbox is fallible, load the Value in a register first to
+ // avoid multiple loads.
+ lir = new (alloc()) LUnbox(useRegisterAtStart(box));
+ } else {
+ lir = new (alloc()) LUnbox(useAtStart(box));
+ }
+
+ if (unbox->fallible()) {
+ assignSnapshot(lir, unbox->bailoutKind());
+ }
+
+ define(lir, unbox);
+}
+
+void LIRGenerator::visitAbs(MAbs* ins) {
+ define(allocateAbs(ins, useRegisterAtStart(ins->input())), ins);
+}
+
+void LIRGenerator::visitCopySign(MCopySign* ins) {
+ MDefinition* lhs = ins->lhs();
+ MDefinition* rhs = ins->rhs();
+
+ MOZ_ASSERT(IsFloatingPointType(lhs->type()));
+ MOZ_ASSERT(lhs->type() == rhs->type());
+ MOZ_ASSERT(lhs->type() == ins->type());
+
+ LInstructionHelper<1, 2, 2>* lir;
+ if (lhs->type() == MIRType::Double) {
+ lir = new (alloc()) LCopySignD();
+ } else {
+ lir = new (alloc()) LCopySignF();
+ }
+
+ lir->setTemp(0, temp());
+ lir->setTemp(1, temp());
+
+ lir->setOperand(0, useRegisterAtStart(lhs));
+ lir->setOperand(1, useRegister(rhs));
+ defineReuseInput(lir, ins, 0);
+}
+
+void LIRGenerator::visitPowHalf(MPowHalf* ins) {
+ MDefinition* input = ins->input();
+ MOZ_ASSERT(input->type() == MIRType::Double);
+ LPowHalfD* lir = new (alloc()) LPowHalfD(useRegisterAtStart(input));
+ defineReuseInput(lir, ins, 0);
+}
+
+void LIRGenerator::visitExtendInt32ToInt64(MExtendInt32ToInt64* ins) {
+ defineInt64(
+ new (alloc()) LExtendInt32ToInt64(useRegisterAtStart(ins->input())), ins);
+}
+
+void LIRGenerator::visitSignExtendInt64(MSignExtendInt64* ins) {
+ defineInt64(new (alloc())
+ LSignExtendInt64(useInt64RegisterAtStart(ins->input())),
+ ins);
+}
+
+void LIRGenerator::visitInt64ToFloatingPoint(MInt64ToFloatingPoint* ins) {
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Int64);
+ MOZ_ASSERT(IsFloatingPointType(ins->type()));
+
+ define(new (alloc()) LInt64ToFloatingPoint(useInt64Register(opd)), ins);
+}
+
+void LIRGenerator::visitSubstr(MSubstr* ins) {
+ LSubstr* lir = new (alloc())
+ LSubstr(useRegister(ins->string()), useRegister(ins->begin()),
+ useRegister(ins->length()), temp(), temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitCompareExchangeTypedArrayElement(
+ MCompareExchangeTypedArrayElement* ins) {
+ MOZ_ASSERT(ins->arrayType() != Scalar::Float32);
+ MOZ_ASSERT(ins->arrayType() != Scalar::Float64);
+
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
+
+ const LUse elements = useRegister(ins->elements());
+ const LAllocation index =
+ useRegisterOrIndexConstant(ins->index(), ins->arrayType());
+
+ const LAllocation newval = useRegister(ins->newval());
+ const LAllocation oldval = useRegister(ins->oldval());
+
+ if (Scalar::isBigIntType(ins->arrayType())) {
+ LInt64Definition temp1 = tempInt64();
+ LInt64Definition temp2 = tempInt64();
+
+ auto* lir = new (alloc()) LCompareExchangeTypedArrayElement64(
+ elements, index, oldval, newval, temp1, temp2);
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+ return;
+ }
+
+ // If the target is a floating register then we need a temp at the
+ // CodeGenerator level for creating the result.
+
+ LDefinition outTemp = LDefinition::BogusTemp();
+ LDefinition valueTemp = LDefinition::BogusTemp();
+ LDefinition offsetTemp = LDefinition::BogusTemp();
+ LDefinition maskTemp = LDefinition::BogusTemp();
+
+ if (ins->arrayType() == Scalar::Uint32 && IsFloatingPointType(ins->type())) {
+ outTemp = temp();
+ }
+
+ if (Scalar::byteSize(ins->arrayType()) < 4) {
+ valueTemp = temp();
+ offsetTemp = temp();
+ maskTemp = temp();
+ }
+
+ LCompareExchangeTypedArrayElement* lir = new (alloc())
+ LCompareExchangeTypedArrayElement(elements, index, oldval, newval,
+ outTemp, valueTemp, offsetTemp,
+ maskTemp);
+
+ define(lir, ins);
+}
+
+void LIRGenerator::visitAtomicExchangeTypedArrayElement(
+ MAtomicExchangeTypedArrayElement* ins) {
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
+
+ const LUse elements = useRegister(ins->elements());
+ const LAllocation index =
+ useRegisterOrIndexConstant(ins->index(), ins->arrayType());
+
+ const LAllocation value = useRegister(ins->value());
+
+ if (Scalar::isBigIntType(ins->arrayType())) {
+ LInt64Definition temp1 = tempInt64();
+ LDefinition temp2 = temp();
+
+ auto* lir = new (alloc()) LAtomicExchangeTypedArrayElement64(
+ elements, index, value, temp1, temp2);
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+ return;
+ }
+
+ // If the target is a floating register then we need a temp at the
+ // CodeGenerator level for creating the result.
+
+ MOZ_ASSERT(ins->arrayType() <= Scalar::Uint32);
+
+ LDefinition outTemp = LDefinition::BogusTemp();
+ LDefinition valueTemp = LDefinition::BogusTemp();
+ LDefinition offsetTemp = LDefinition::BogusTemp();
+ LDefinition maskTemp = LDefinition::BogusTemp();
+
+ if (ins->arrayType() == Scalar::Uint32) {
+ MOZ_ASSERT(ins->type() == MIRType::Double);
+ outTemp = temp();
+ }
+
+ if (Scalar::byteSize(ins->arrayType()) < 4) {
+ valueTemp = temp();
+ offsetTemp = temp();
+ maskTemp = temp();
+ }
+
+ LAtomicExchangeTypedArrayElement* lir =
+ new (alloc()) LAtomicExchangeTypedArrayElement(
+ elements, index, value, outTemp, valueTemp, offsetTemp, maskTemp);
+
+ define(lir, ins);
+}
+
+void LIRGenerator::visitAtomicTypedArrayElementBinop(
+ MAtomicTypedArrayElementBinop* ins) {
+ MOZ_ASSERT(ins->arrayType() != Scalar::Uint8Clamped);
+ MOZ_ASSERT(ins->arrayType() != Scalar::Float32);
+ MOZ_ASSERT(ins->arrayType() != Scalar::Float64);
+
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
+
+ const LUse elements = useRegister(ins->elements());
+ const LAllocation index =
+ useRegisterOrIndexConstant(ins->index(), ins->arrayType());
+ const LAllocation value = useRegister(ins->value());
+
+ if (Scalar::isBigIntType(ins->arrayType())) {
+ LInt64Definition temp1 = tempInt64();
+ LInt64Definition temp2 = tempInt64();
+
+ // Case 1: the result of the operation is not used.
+ //
+ // We can omit allocating the result BigInt.
+
+ if (ins->isForEffect()) {
+ auto* lir = new (alloc()) LAtomicTypedArrayElementBinopForEffect64(
+ elements, index, value, temp1, temp2);
+ add(lir, ins);
+ return;
+ }
+
+ // Case 2: the result of the operation is used.
+
+ auto* lir = new (alloc())
+ LAtomicTypedArrayElementBinop64(elements, index, value, temp1, temp2);
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+ return;
+ }
+
+ LDefinition valueTemp = LDefinition::BogusTemp();
+ LDefinition offsetTemp = LDefinition::BogusTemp();
+ LDefinition maskTemp = LDefinition::BogusTemp();
+
+ if (Scalar::byteSize(ins->arrayType()) < 4) {
+ valueTemp = temp();
+ offsetTemp = temp();
+ maskTemp = temp();
+ }
+
+ if (ins->isForEffect()) {
+ LAtomicTypedArrayElementBinopForEffect* lir =
+ new (alloc()) LAtomicTypedArrayElementBinopForEffect(
+ elements, index, value, valueTemp, offsetTemp, maskTemp);
+ add(lir, ins);
+ return;
+ }
+
+ // For a Uint32Array with a known double result we need a temp for
+ // the intermediate output.
+
+ LDefinition outTemp = LDefinition::BogusTemp();
+
+ if (ins->arrayType() == Scalar::Uint32 && IsFloatingPointType(ins->type())) {
+ outTemp = temp();
+ }
+
+ LAtomicTypedArrayElementBinop* lir =
+ new (alloc()) LAtomicTypedArrayElementBinop(
+ elements, index, value, outTemp, valueTemp, offsetTemp, maskTemp);
+ define(lir, ins);
+}
+
+void LIRGenerator::visitReturnImpl(MDefinition* opd, bool isGenerator) {
+ MOZ_ASSERT(opd->type() == MIRType::Value);
+
+ LReturn* ins = new (alloc()) LReturn(isGenerator);
+ ins->setOperand(0, useFixed(opd, JSReturnReg));
+ add(ins);
+}
+
+void LIRGenerator::visitAsmJSLoadHeap(MAsmJSLoadHeap* ins) {
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ MDefinition* boundsCheckLimit = ins->boundsCheckLimit();
+ MOZ_ASSERT_IF(ins->needsBoundsCheck(),
+ boundsCheckLimit->type() == MIRType::Int32);
+
+ LAllocation baseAlloc = useRegisterAtStart(base);
+
+ LAllocation limitAlloc = ins->needsBoundsCheck()
+ ? useRegisterAtStart(boundsCheckLimit)
+ : LAllocation();
+
+ // We have no memory-base value, meaning that HeapReg is to be used as the
+ // memory base. This follows from the definition of
+ // FunctionCompiler::maybeLoadMemoryBase() in WasmIonCompile.cpp.
+ MOZ_ASSERT(!ins->hasMemoryBase());
+ auto* lir =
+ new (alloc()) LAsmJSLoadHeap(baseAlloc, limitAlloc, LAllocation());
+ define(lir, ins);
+}
+
+void LIRGenerator::visitAsmJSStoreHeap(MAsmJSStoreHeap* ins) {
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ MDefinition* boundsCheckLimit = ins->boundsCheckLimit();
+ MOZ_ASSERT_IF(ins->needsBoundsCheck(),
+ boundsCheckLimit->type() == MIRType::Int32);
+
+ LAllocation baseAlloc = useRegisterAtStart(base);
+
+ LAllocation limitAlloc = ins->needsBoundsCheck()
+ ? useRegisterAtStart(boundsCheckLimit)
+ : LAllocation();
+
+ // See comment in LIRGenerator::visitAsmJSStoreHeap just above.
+ MOZ_ASSERT(!ins->hasMemoryBase());
+ add(new (alloc()) LAsmJSStoreHeap(baseAlloc, useRegisterAtStart(ins->value()),
+ limitAlloc, LAllocation()),
+ ins);
+}
+
+void LIRGenerator::visitWasmHeapBase(MWasmHeapBase* ins) {
+ auto* lir = new (alloc()) LWasmHeapBase(LAllocation());
+ define(lir, ins);
+}
+
+void LIRGenerator::visitWasmLoad(MWasmLoad* ins) {
+ MDefinition* base = ins->base();
+ // 'base' is a GPR but may be of either type. If it is 32-bit, it is
+ // sign-extended on loongarch64 platform and we should explicitly promote it
+ // to 64-bit when use it as an index register in memory accesses.
+ MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
+
+ LAllocation ptr;
+ ptr = useRegisterAtStart(base);
+
+ if (ins->type() == MIRType::Int64) {
+ auto* lir = new (alloc()) LWasmLoadI64(ptr);
+ if (ins->access().offset()) {
+ lir->setTemp(0, tempCopy(base, 0));
+ }
+
+ defineInt64(lir, ins);
+ return;
+ }
+
+ auto* lir = new (alloc()) LWasmLoad(ptr);
+ if (ins->access().offset()) {
+ lir->setTemp(0, tempCopy(base, 0));
+ }
+
+ define(lir, ins);
+}
+
+void LIRGenerator::visitWasmStore(MWasmStore* ins) {
+ MDefinition* base = ins->base();
+ // See comment in visitWasmLoad re the type of 'base'.
+ MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
+
+ MDefinition* value = ins->value();
+
+ if (ins->access().type() == Scalar::Int64) {
+ LAllocation baseAlloc = useRegisterAtStart(base);
+ LInt64Allocation valueAlloc = useInt64RegisterAtStart(value);
+ auto* lir = new (alloc()) LWasmStoreI64(baseAlloc, valueAlloc);
+ if (ins->access().offset()) {
+ lir->setTemp(0, tempCopy(base, 0));
+ }
+
+ add(lir, ins);
+ return;
+ }
+
+ LAllocation baseAlloc = useRegisterAtStart(base);
+ LAllocation valueAlloc = useRegisterAtStart(value);
+ auto* lir = new (alloc()) LWasmStore(baseAlloc, valueAlloc);
+ if (ins->access().offset()) {
+ lir->setTemp(0, tempCopy(base, 0));
+ }
+
+ add(lir, ins);
+}
+
+void LIRGenerator::visitWasmNeg(MWasmNeg* ins) {
+ if (ins->type() == MIRType::Int32) {
+ define(new (alloc()) LNegI(useRegisterAtStart(ins->input())), ins);
+ } else if (ins->type() == MIRType::Float32) {
+ define(new (alloc()) LNegF(useRegisterAtStart(ins->input())), ins);
+ } else {
+ MOZ_ASSERT(ins->type() == MIRType::Double);
+ define(new (alloc()) LNegD(useRegisterAtStart(ins->input())), ins);
+ }
+}
+
+void LIRGenerator::visitWasmTruncateToInt64(MWasmTruncateToInt64* ins) {
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Double || opd->type() == MIRType::Float32);
+
+ defineInt64(new (alloc()) LWasmTruncateToInt64(useRegister(opd)), ins);
+}
+
+void LIRGenerator::visitWasmUnsignedToDouble(MWasmUnsignedToDouble* ins) {
+ MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
+ LWasmUint32ToDouble* lir =
+ new (alloc()) LWasmUint32ToDouble(useRegisterAtStart(ins->input()));
+ define(lir, ins);
+}
+
+void LIRGenerator::visitWasmUnsignedToFloat32(MWasmUnsignedToFloat32* ins) {
+ MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
+ LWasmUint32ToFloat32* lir =
+ new (alloc()) LWasmUint32ToFloat32(useRegisterAtStart(ins->input()));
+ define(lir, ins);
+}
+
+void LIRGenerator::visitWasmCompareExchangeHeap(MWasmCompareExchangeHeap* ins) {
+ MDefinition* base = ins->base();
+ // See comment in visitWasmLoad re the type of 'base'.
+ MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
+
+ if (ins->access().type() == Scalar::Int64) {
+ auto* lir = new (alloc()) LWasmCompareExchangeI64(
+ useRegister(base), useInt64Register(ins->oldValue()),
+ useInt64Register(ins->newValue()));
+ defineInt64(lir, ins);
+ return;
+ }
+
+ LDefinition valueTemp = LDefinition::BogusTemp();
+ LDefinition offsetTemp = LDefinition::BogusTemp();
+ LDefinition maskTemp = LDefinition::BogusTemp();
+
+ if (ins->access().byteSize() < 4) {
+ valueTemp = temp();
+ offsetTemp = temp();
+ maskTemp = temp();
+ }
+
+ LWasmCompareExchangeHeap* lir = new (alloc()) LWasmCompareExchangeHeap(
+ useRegister(base), useRegister(ins->oldValue()),
+ useRegister(ins->newValue()), valueTemp, offsetTemp, maskTemp);
+
+ define(lir, ins);
+}
+
+void LIRGenerator::visitWasmAtomicExchangeHeap(MWasmAtomicExchangeHeap* ins) {
+ MDefinition* base = ins->base();
+ // See comment in visitWasmLoad re the type of 'base'.
+ MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
+
+ if (ins->access().type() == Scalar::Int64) {
+ auto* lir = new (alloc()) LWasmAtomicExchangeI64(
+ useRegister(base), useInt64Register(ins->value()));
+ defineInt64(lir, ins);
+ return;
+ }
+
+ LDefinition valueTemp = LDefinition::BogusTemp();
+ LDefinition offsetTemp = LDefinition::BogusTemp();
+ LDefinition maskTemp = LDefinition::BogusTemp();
+
+ if (ins->access().byteSize() < 4) {
+ valueTemp = temp();
+ offsetTemp = temp();
+ maskTemp = temp();
+ }
+
+ LWasmAtomicExchangeHeap* lir = new (alloc())
+ LWasmAtomicExchangeHeap(useRegister(base), useRegister(ins->value()),
+ valueTemp, offsetTemp, maskTemp);
+ define(lir, ins);
+}
+
+void LIRGenerator::visitWasmAtomicBinopHeap(MWasmAtomicBinopHeap* ins) {
+ MDefinition* base = ins->base();
+ // See comment in visitWasmLoad re the type of 'base'.
+ MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
+
+ if (ins->access().type() == Scalar::Int64) {
+ auto* lir = new (alloc())
+ LWasmAtomicBinopI64(useRegister(base), useInt64Register(ins->value()));
+ lir->setTemp(0, temp());
+ defineInt64(lir, ins);
+ return;
+ }
+
+ LDefinition valueTemp = LDefinition::BogusTemp();
+ LDefinition offsetTemp = LDefinition::BogusTemp();
+ LDefinition maskTemp = LDefinition::BogusTemp();
+
+ if (ins->access().byteSize() < 4) {
+ valueTemp = temp();
+ offsetTemp = temp();
+ maskTemp = temp();
+ }
+
+ if (!ins->hasUses()) {
+ LWasmAtomicBinopHeapForEffect* lir = new (alloc())
+ LWasmAtomicBinopHeapForEffect(useRegister(base),
+ useRegister(ins->value()), valueTemp,
+ offsetTemp, maskTemp);
+ add(lir, ins);
+ return;
+ }
+
+ LWasmAtomicBinopHeap* lir = new (alloc())
+ LWasmAtomicBinopHeap(useRegister(base), useRegister(ins->value()),
+ valueTemp, offsetTemp, maskTemp);
+
+ define(lir, ins);
+}
+
+void LIRGenerator::visitWasmTernarySimd128(MWasmTernarySimd128* ins) {
+ MOZ_CRASH("ternary SIMD NYI");
+}
+
+void LIRGenerator::visitWasmBinarySimd128(MWasmBinarySimd128* ins) {
+ MOZ_CRASH("binary SIMD NYI");
+}
+
+#ifdef ENABLE_WASM_SIMD
+bool MWasmTernarySimd128::specializeBitselectConstantMaskAsShuffle(
+ int8_t shuffle[16]) {
+ return false;
+}
+#endif
+
+bool MWasmBinarySimd128::specializeForConstantRhs() {
+ // Probably many we want to do here
+ return false;
+}
+
+void LIRGenerator::visitWasmBinarySimd128WithConstant(
+ MWasmBinarySimd128WithConstant* ins) {
+ MOZ_CRASH("binary SIMD with constant NYI");
+}
+
+void LIRGenerator::visitWasmShiftSimd128(MWasmShiftSimd128* ins) {
+ MOZ_CRASH("shift SIMD NYI");
+}
+
+void LIRGenerator::visitWasmShuffleSimd128(MWasmShuffleSimd128* ins) {
+ MOZ_CRASH("shuffle SIMD NYI");
+}
+
+void LIRGenerator::visitWasmReplaceLaneSimd128(MWasmReplaceLaneSimd128* ins) {
+ MOZ_CRASH("replace-lane SIMD NYI");
+}
+
+void LIRGenerator::visitWasmScalarToSimd128(MWasmScalarToSimd128* ins) {
+ MOZ_CRASH("scalar-to-SIMD NYI");
+}
+
+void LIRGenerator::visitWasmUnarySimd128(MWasmUnarySimd128* ins) {
+ MOZ_CRASH("unary SIMD NYI");
+}
+
+void LIRGenerator::visitWasmReduceSimd128(MWasmReduceSimd128* ins) {
+ MOZ_CRASH("reduce-SIMD NYI");
+}
+
+void LIRGenerator::visitWasmLoadLaneSimd128(MWasmLoadLaneSimd128* ins) {
+ MOZ_CRASH("load-lane SIMD NYI");
+}
+
+void LIRGenerator::visitWasmStoreLaneSimd128(MWasmStoreLaneSimd128* ins) {
+ MOZ_CRASH("store-lane SIMD NYI");
+}
diff --git a/js/src/jit/riscv64/Lowering-riscv64.h b/js/src/jit/riscv64/Lowering-riscv64.h
new file mode 100644
index 0000000000..03ccb3ac8f
--- /dev/null
+++ b/js/src/jit/riscv64/Lowering-riscv64.h
@@ -0,0 +1,110 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_riscv64_Lowering_riscv64_h
+#define jit_riscv64_Lowering_riscv64_h
+
+#include "jit/shared/Lowering-shared.h"
+
+namespace js {
+namespace jit {
+
+class LIRGeneratorRiscv64 : public LIRGeneratorShared {
+ protected:
+ LIRGeneratorRiscv64(MIRGenerator* gen, MIRGraph& graph, LIRGraph& lirGraph)
+ : LIRGeneratorShared(gen, graph, lirGraph) {}
+
+ LTableSwitch* newLTableSwitch(const LAllocation& in,
+ const LDefinition& inputCopy,
+ MTableSwitch* ins);
+ LTableSwitchV* newLTableSwitchV(MTableSwitch* ins);
+
+ void lowerForShift(LInstructionHelper<1, 2, 0>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs);
+ template <size_t Temps>
+ void lowerForShiftInt64(
+ LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, Temps>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
+
+ void lowerForALU(LInstructionHelper<1, 1, 0>* ins, MDefinition* mir,
+ MDefinition* input);
+ void lowerForALU(LInstructionHelper<1, 2, 0>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs);
+ void lowerForALUInt64(LInstructionHelper<INT64_PIECES, INT64_PIECES, 0>* ins,
+ MDefinition* mir, MDefinition* input);
+ void lowerForALUInt64(
+ LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
+ void lowerForMulInt64(LMulI64* ins, MMul* mir, MDefinition* lhs,
+ MDefinition* rhs);
+
+ void lowerForFPU(LInstructionHelper<1, 1, 0>* ins, MDefinition* mir,
+ MDefinition* src);
+ template <size_t Temps>
+ void lowerForFPU(LInstructionHelper<1, 2, Temps>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs);
+
+ void lowerForCompareI64AndBranch(MTest* mir, MCompare* comp, JSOp op,
+ MDefinition* left, MDefinition* right,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse);
+ void lowerForBitAndAndBranch(LBitAndAndBranch* baab, MInstruction* mir,
+ MDefinition* lhs, MDefinition* rhs);
+
+ // Returns a box allocation. reg2 is ignored on 64-bit platforms.
+ LBoxAllocation useBoxFixed(MDefinition* mir, Register reg1, Register reg2,
+ bool useAtStart = false);
+
+ LAllocation useByteOpRegister(MDefinition* mir);
+ LAllocation useByteOpRegisterAtStart(MDefinition* mir);
+ LAllocation useByteOpRegisterOrNonDoubleConstant(MDefinition* mir);
+
+ LDefinition tempByteOpRegister();
+ LDefinition tempToUnbox();
+
+ bool needTempForPostBarrier() { return true; }
+
+ void lowerUntypedPhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block,
+ size_t lirIndex);
+ void lowerInt64PhiInput(MPhi*, uint32_t, LBlock*, size_t);
+ void defineInt64Phi(MPhi*, size_t);
+
+ void lowerNegI(MInstruction* ins, MDefinition* input);
+ void lowerNegI64(MInstruction* ins, MDefinition* input);
+ void lowerMulI(MMul* mul, MDefinition* lhs, MDefinition* rhs);
+ void lowerDivI(MDiv* div);
+ void lowerDivI64(MDiv* div);
+ void lowerModI(MMod* mod);
+ void lowerModI64(MMod* mod);
+ void lowerUDiv(MDiv* div);
+ void lowerUDivI64(MDiv* div);
+ void lowerUMod(MMod* mod);
+ void lowerUModI64(MMod* mod);
+ void lowerUrshD(MUrsh* mir);
+ void lowerPowOfTwoI(MPow* mir);
+ void lowerBigIntDiv(MBigIntDiv* ins);
+ void lowerBigIntMod(MBigIntMod* ins);
+ void lowerBigIntLsh(MBigIntLsh* ins);
+ void lowerBigIntRsh(MBigIntRsh* ins);
+ void lowerTruncateDToInt32(MTruncateToInt32* ins);
+ void lowerTruncateFToInt32(MTruncateToInt32* ins);
+ void lowerBuiltinInt64ToFloatingPoint(MBuiltinInt64ToFloatingPoint* ins);
+ void lowerWasmSelectI(MWasmSelect* select);
+ void lowerWasmSelectI64(MWasmSelect* select);
+ void lowerWasmBuiltinTruncateToInt64(MWasmBuiltinTruncateToInt64* ins);
+ void lowerWasmBuiltinTruncateToInt32(MWasmBuiltinTruncateToInt32* ins);
+ void lowerWasmBuiltinDivI64(MWasmBuiltinDivI64* div);
+ void lowerWasmBuiltinModI64(MWasmBuiltinModI64* mod);
+
+ void lowerAtomicLoad64(MLoadUnboxedScalar* ins);
+ void lowerAtomicStore64(MStoreUnboxedScalar* ins);
+};
+
+typedef LIRGeneratorRiscv64 LIRGeneratorSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_riscv64_Lowering_riscv64_h */
diff --git a/js/src/jit/riscv64/MacroAssembler-riscv64-inl.h b/js/src/jit/riscv64/MacroAssembler-riscv64-inl.h
new file mode 100644
index 0000000000..65f04a33e8
--- /dev/null
+++ b/js/src/jit/riscv64/MacroAssembler-riscv64-inl.h
@@ -0,0 +1,2025 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_riscv64_MacroAssembler_riscv64_inl_h
+#define jit_riscv64_MacroAssembler_riscv64_inl_h
+
+#include "jit/riscv64/MacroAssembler-riscv64.h"
+
+namespace js {
+namespace jit {
+
+template <>
+inline void MacroAssembler::cmpPtrSet(Assembler::Condition cond, Address lhs,
+ ImmPtr rhs, Register dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ loadPtr(lhs, scratch2);
+ cmpPtrSet(cond, Register(scratch2), rhs, dest);
+}
+
+template <>
+inline void MacroAssembler::cmpPtrSet(Assembler::Condition cond, Register lhs,
+ Address rhs, Register dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ MOZ_ASSERT(lhs != scratch);
+ loadPtr(rhs, scratch);
+ cmpPtrSet(cond, lhs, Register(scratch), dest);
+}
+
+template <>
+inline void MacroAssembler::cmpPtrSet(Assembler::Condition cond, Address lhs,
+ Register rhs, Register dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ MOZ_ASSERT(rhs != scratch2);
+ loadPtr(lhs, scratch2);
+ cmpPtrSet(cond, Register(scratch2), rhs, dest);
+}
+
+template <>
+inline void MacroAssembler::cmp32Set(Assembler::Condition cond, Register lhs,
+ Address rhs, Register dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ MOZ_ASSERT(lhs != scratch);
+ load32(rhs, scratch);
+ cmp32Set(cond, lhs, Register(scratch), dest);
+}
+
+template <>
+inline void MacroAssembler::cmp32Set(Assembler::Condition cond, Address lhs,
+ Register rhs, Register dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ MOZ_ASSERT(rhs != scratch2);
+ load32(lhs, scratch2);
+ cmp32Set(cond, Register(scratch2), rhs, dest);
+}
+
+//{{{ check_macroassembler_style
+CodeOffset MacroAssembler::sub32FromStackPtrWithPatch(Register dest) {
+ CodeOffset offset = CodeOffset(currentOffset());
+ MacroAssemblerRiscv64::ma_liPatchable(dest, Imm32(0));
+ sub(dest, StackPointer, dest);
+ return offset;
+}
+
+template <class L>
+void MacroAssembler::branchTest32(Condition cond, Register lhs, Imm32 rhs,
+ L label) {
+ MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed ||
+ cond == NotSigned);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ma_and(scratch, lhs, rhs);
+ ma_b(scratch, scratch, label, cond);
+}
+template <class L>
+void MacroAssembler::branchTest32(Condition cond, Register lhs, Register rhs,
+ L label) {
+ MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed ||
+ cond == NotSigned);
+ if (lhs == rhs) {
+ ma_b(lhs, rhs, label, cond);
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ and_(scratch, lhs, rhs);
+ ma_b(scratch, scratch, label, cond);
+ }
+}
+template <class L>
+void MacroAssembler::branchTest64(Condition cond, Register64 lhs,
+ Register64 rhs, Register temp, L label) {
+ branchTestPtr(cond, lhs.reg, rhs.reg, label);
+}
+
+template <typename T>
+void MacroAssembler::branchAdd32(Condition cond, T src, Register dest,
+ Label* overflow) {
+ switch (cond) {
+ case Overflow:
+ ma_add32TestOverflow(dest, dest, src, overflow);
+ break;
+ case CarryClear:
+ case CarrySet:
+ ma_add32TestCarry(cond, dest, dest, src, overflow);
+ break;
+ default:
+ MOZ_CRASH("NYI");
+ }
+}
+template <typename T>
+void MacroAssembler::branchAddPtr(Condition cond, T src, Register dest,
+ Label* label) {
+ switch (cond) {
+ case Overflow:
+ ma_addPtrTestOverflow(dest, dest, src, label);
+ break;
+ case CarryClear:
+ case CarrySet:
+ ma_addPtrTestCarry(cond, dest, dest, src, label);
+ break;
+ default:
+ MOZ_CRASH("NYI");
+ }
+}
+template <typename T>
+void MacroAssembler::branchMul32(Condition cond, T src, Register dest,
+ Label* overflow) {
+ MOZ_ASSERT(cond == Assembler::Overflow);
+ ma_mul32TestOverflow(dest, dest, src, overflow);
+}
+template <typename T>
+void MacroAssembler::branchRshift32(Condition cond, T src, Register dest,
+ Label* label) {
+ MOZ_ASSERT(cond == Zero || cond == NonZero);
+ rshift32(src, dest);
+ branch32(cond == Zero ? Equal : NotEqual, dest, Imm32(0), label);
+}
+// the type of 'T src' maybe a Register, maybe a Imm32,depends on who call it.
+template <typename T>
+void MacroAssembler::branchSub32(Condition cond, T src, Register dest,
+ Label* label) {
+ switch (cond) {
+ case Overflow:
+ ma_sub32TestOverflow(dest, dest, src, label);
+ break;
+ case NonZero:
+ case Zero:
+ case Signed:
+ case NotSigned:
+ ma_sub32(dest, dest, src);
+ ma_b(dest, dest, label, cond);
+ break;
+ default:
+ MOZ_CRASH("NYI");
+ }
+}
+template <typename T>
+void MacroAssembler::branchSubPtr(Condition cond, T src, Register dest,
+ Label* label) {
+ switch (cond) {
+ case Overflow:
+ ma_subPtrTestOverflow(dest, dest, src, label);
+ break;
+ case NonZero:
+ case Zero:
+ case Signed:
+ case NotSigned:
+ subPtr(src, dest);
+ ma_b(dest, dest, label, cond);
+ break;
+ default:
+ MOZ_CRASH("NYI");
+ }
+}
+template <typename T>
+void MacroAssembler::branchTestGCThingImpl(Condition cond, const T& address,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ Register tag = extractTag(address, scratch2);
+ ma_b(tag, ImmTag(JS::detail::ValueLowerInclGCThingTag), label,
+ (cond == Equal) ? AboveOrEqual : Below);
+}
+template <typename T>
+void MacroAssembler::testBigIntSet(Condition cond, const T& src,
+ Register dest) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ Register tag = extractTag(src, scratch2);
+ ma_cmp_set(dest, tag, ImmTag(JSVAL_TAG_BIGINT), cond);
+}
+
+template <typename T>
+void MacroAssembler::testBooleanSet(Condition cond, const T& src,
+ Register dest) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ Register tag = extractTag(src, scratch2);
+ ma_cmp_set(dest, tag, ImmTag(JSVAL_TAG_BOOLEAN), cond);
+}
+
+template <typename T>
+void MacroAssembler::testNumberSet(Condition cond, const T& src,
+ Register dest) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ Register tag = extractTag(src, scratch2);
+ ma_cmp_set(dest, tag, ImmTag(JS::detail::ValueUpperInclNumberTag),
+ cond == Equal ? BelowOrEqual : Above);
+}
+
+template <typename T>
+void MacroAssembler::testStringSet(Condition cond, const T& src,
+ Register dest) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ Register tag = extractTag(src, scratch2);
+ ma_cmp_set(dest, tag, ImmTag(JSVAL_TAG_STRING), cond);
+}
+
+template <typename T>
+void MacroAssembler::testSymbolSet(Condition cond, const T& src,
+ Register dest) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ Register tag = extractTag(src, scratch2);
+ ma_cmp_set(dest, tag, ImmTag(JSVAL_TAG_SYMBOL), cond);
+}
+
+// Also see below for specializations of cmpPtrSet.
+template <typename T1, typename T2>
+void MacroAssembler::cmp32Set(Condition cond, T1 lhs, T2 rhs, Register dest) {
+ ma_cmp_set(dest, lhs, rhs, cond);
+}
+template <typename T1, typename T2>
+void MacroAssembler::cmpPtrSet(Condition cond, T1 lhs, T2 rhs, Register dest) {
+ ma_cmp_set(dest, lhs, rhs, cond);
+}
+void MacroAssembler::abs32(Register src, Register dest) {
+ ScratchRegisterScope scratch(asMasm());
+ sraiw(scratch, src, 31);
+ xor_(dest, src, scratch);
+ subw(dest, dest, scratch);
+}
+void MacroAssembler::absFloat32(FloatRegister src, FloatRegister dest) {
+ fabs_s(dest, src);
+}
+
+void MacroAssembler::absDouble(FloatRegister src, FloatRegister dest) {
+ fabs_d(dest, src);
+}
+void MacroAssembler::add32(Register src, Register dest) {
+ ma_add32(dest, dest, src);
+}
+
+void MacroAssembler::add32(Imm32 imm, Register dest) {
+ ma_add32(dest, dest, imm);
+}
+
+void MacroAssembler::add32(Imm32 imm, const Address& dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ load32(dest, scratch2);
+ ma_add32(scratch2, scratch2, imm);
+ store32(scratch2, dest);
+}
+void MacroAssembler::add64(Register64 src, Register64 dest) {
+ addPtr(src.reg, dest.reg);
+}
+
+void MacroAssembler::add64(const Operand& src, Register64 dest) {
+ if (src.is_mem()) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Register64 scratch64(scratch);
+
+ load64(src.toAddress(), scratch64);
+ add64(scratch64, dest);
+ } else {
+ add64(Register64(src.toReg()), dest);
+ }
+}
+
+void MacroAssembler::add64(Imm32 imm, Register64 dest) {
+ ma_add64(dest.reg, dest.reg, imm);
+}
+
+void MacroAssembler::add64(Imm64 imm, Register64 dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ MOZ_ASSERT(dest.reg != scratch);
+ mov(ImmWord(imm.value), scratch);
+ add(dest.reg, dest.reg, scratch);
+}
+void MacroAssembler::addDouble(FloatRegister src, FloatRegister dest) {
+ fadd_d(dest, dest, src);
+}
+
+void MacroAssembler::addFloat32(FloatRegister src, FloatRegister dest) {
+ fadd_s(dest, dest, src);
+}
+void MacroAssembler::addPtr(Register src, Register dest) {
+ ma_add64(dest, dest, Operand(src));
+}
+
+void MacroAssembler::addPtr(Imm32 imm, Register dest) {
+ ma_add64(dest, dest, imm);
+}
+
+void MacroAssembler::addPtr(ImmWord imm, Register dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ movePtr(imm, scratch);
+ addPtr(scratch, dest);
+}
+void MacroAssembler::addPtr(Imm32 imm, const Address& dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ loadPtr(dest, scratch);
+ addPtr(imm, scratch);
+ storePtr(scratch, dest);
+}
+
+void MacroAssembler::addPtr(const Address& src, Register dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ loadPtr(src, scratch);
+ addPtr(scratch, dest);
+}
+void MacroAssembler::and32(Register src, Register dest) {
+ ma_and(dest, dest, src);
+}
+
+void MacroAssembler::and32(Imm32 imm, Register dest) {
+ ma_and(dest, dest, imm);
+}
+
+void MacroAssembler::and32(Imm32 imm, const Address& dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ load32(dest, scratch2);
+ ma_and(scratch2, imm);
+ store32(scratch2, dest);
+}
+
+void MacroAssembler::and32(const Address& src, Register dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ load32(src, scratch2);
+ ma_and(dest, dest, scratch2);
+}
+void MacroAssembler::and64(Imm64 imm, Register64 dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ma_li(scratch, ImmWord(imm.value));
+ ma_and(dest.reg, dest.reg, scratch);
+}
+
+void MacroAssembler::and64(Register64 src, Register64 dest) {
+ ma_and(dest.reg, dest.reg, src.reg);
+}
+
+void MacroAssembler::and64(const Operand& src, Register64 dest) {
+ if (src.is_mem()) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Register64 scratch64(scratch);
+
+ load64(src.toAddress(), scratch64);
+ ma_and(dest.scratchReg(), scratch64.scratchReg());
+ } else {
+ ma_and(dest.scratchReg(), src.toReg());
+ }
+}
+
+void MacroAssembler::andPtr(Register src, Register dest) {
+ ma_and(dest, dest, src);
+}
+
+void MacroAssembler::andPtr(Imm32 imm, Register dest) {
+ ma_and(dest, dest, imm);
+}
+
+void MacroAssembler::branch8(Condition cond, const Address& lhs, Imm32 rhs,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ MOZ_ASSERT(scratch2 != lhs.base);
+
+ switch (cond) {
+ case Assembler::Equal:
+ case Assembler::NotEqual:
+ case Assembler::Above:
+ case Assembler::AboveOrEqual:
+ case Assembler::Below:
+ case Assembler::BelowOrEqual:
+ load8ZeroExtend(lhs, scratch2);
+ branch32(cond, scratch2, Imm32(uint8_t(rhs.value)), label);
+ break;
+
+ case Assembler::GreaterThan:
+ case Assembler::GreaterThanOrEqual:
+ case Assembler::LessThan:
+ case Assembler::LessThanOrEqual:
+ load8SignExtend(lhs, scratch2);
+ branch32(cond, scratch2, Imm32(int8_t(rhs.value)), label);
+ break;
+
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+void MacroAssembler::branch8(Condition cond, const BaseIndex& lhs, Register rhs,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ MOZ_ASSERT(scratch2 != lhs.base);
+
+ computeScaledAddress(lhs, scratch2);
+
+ switch (cond) {
+ case Assembler::Equal:
+ case Assembler::NotEqual:
+ case Assembler::Above:
+ case Assembler::AboveOrEqual:
+ case Assembler::Below:
+ case Assembler::BelowOrEqual:
+ load8ZeroExtend(Address(scratch2, lhs.offset), scratch2);
+ branch32(cond, scratch2, rhs, label);
+ break;
+
+ case Assembler::GreaterThan:
+ case Assembler::GreaterThanOrEqual:
+ case Assembler::LessThan:
+ case Assembler::LessThanOrEqual:
+ load8SignExtend(Address(scratch2, lhs.offset), scratch2);
+ branch32(cond, scratch2, rhs, label);
+ break;
+
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+void MacroAssembler::branch16(Condition cond, const Address& lhs, Imm32 rhs,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ MOZ_ASSERT(scratch2 != lhs.base);
+
+ switch (cond) {
+ case Assembler::Equal:
+ case Assembler::NotEqual:
+ case Assembler::Above:
+ case Assembler::AboveOrEqual:
+ case Assembler::Below:
+ case Assembler::BelowOrEqual:
+ load16ZeroExtend(lhs, scratch2);
+ branch32(cond, scratch2, Imm32(uint16_t(rhs.value)), label);
+ break;
+
+ case Assembler::GreaterThan:
+ case Assembler::GreaterThanOrEqual:
+ case Assembler::LessThan:
+ case Assembler::LessThanOrEqual:
+ load16SignExtend(lhs, scratch2);
+ branch32(cond, scratch2, Imm32(int16_t(rhs.value)), label);
+ break;
+
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+template <class L>
+void MacroAssembler::branch32(Condition cond, Register lhs, Register rhs,
+ L label) {
+ ma_b(lhs, rhs, label, cond);
+}
+
+template <class L>
+void MacroAssembler::branch32(Condition cond, Register lhs, Imm32 imm,
+ L label) {
+ ma_b(lhs, imm, label, cond);
+}
+
+void MacroAssembler::branch32(Condition cond, const Address& lhs, Register rhs,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ load32(lhs, scratch2);
+ ma_b(scratch2, rhs, label, cond);
+}
+
+void MacroAssembler::branch32(Condition cond, const Address& lhs, Imm32 rhs,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ load32(lhs, scratch2);
+ ma_b(scratch2, rhs, label, cond);
+}
+
+void MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs,
+ Register rhs, Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ load32(lhs, scratch2);
+ ma_b(scratch2, rhs, label, cond);
+}
+
+void MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs,
+ Imm32 rhs, Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ load32(lhs, scratch2);
+ ma_b(scratch2, rhs, label, cond);
+}
+
+void MacroAssembler::branch32(Condition cond, const BaseIndex& lhs, Imm32 rhs,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ load32(lhs, scratch2);
+ ma_b(scratch2, rhs, label, cond);
+}
+
+void MacroAssembler::branch32(Condition cond, wasm::SymbolicAddress addr,
+ Imm32 imm, Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ load32(addr, scratch2);
+ ma_b(scratch2, imm, label, cond);
+}
+void MacroAssembler::branch64(Condition cond, Register64 lhs, Imm64 val,
+ Label* success, Label* fail) {
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal ||
+ cond == Assembler::LessThan ||
+ cond == Assembler::LessThanOrEqual ||
+ cond == Assembler::GreaterThan ||
+ cond == Assembler::GreaterThanOrEqual ||
+ cond == Assembler::Below || cond == Assembler::BelowOrEqual ||
+ cond == Assembler::Above || cond == Assembler::AboveOrEqual,
+ "other condition codes not supported");
+
+ branchPtr(cond, lhs.reg, ImmWord(val.value), success);
+ if (fail) {
+ jump(fail);
+ }
+}
+
+void MacroAssembler::branch64(Condition cond, Register64 lhs, Register64 rhs,
+ Label* success, Label* fail) {
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal ||
+ cond == Assembler::LessThan ||
+ cond == Assembler::LessThanOrEqual ||
+ cond == Assembler::GreaterThan ||
+ cond == Assembler::GreaterThanOrEqual ||
+ cond == Assembler::Below || cond == Assembler::BelowOrEqual ||
+ cond == Assembler::Above || cond == Assembler::AboveOrEqual,
+ "other condition codes not supported");
+
+ branchPtr(cond, lhs.reg, rhs.reg, success);
+ if (fail) {
+ jump(fail);
+ }
+}
+
+void MacroAssembler::branch64(Condition cond, const Address& lhs, Imm64 val,
+ Label* label) {
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal,
+ "other condition codes not supported");
+
+ branchPtr(cond, lhs, ImmWord(val.value), label);
+}
+
+void MacroAssembler::branch64(Condition cond, const Address& lhs,
+ Register64 rhs, Label* label) {
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal,
+ "other condition codes not supported");
+
+ branchPtr(cond, lhs, rhs.reg, label);
+}
+
+void MacroAssembler::branch64(Condition cond, const Address& lhs,
+ const Address& rhs, Register scratch,
+ Label* label) {
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal,
+ "other condition codes not supported");
+ MOZ_ASSERT(lhs.base != scratch);
+ MOZ_ASSERT(rhs.base != scratch);
+
+ loadPtr(rhs, scratch);
+ branchPtr(cond, lhs, scratch, label);
+}
+
+void MacroAssembler::branchDouble(DoubleCondition cc, FloatRegister frs1,
+ FloatRegister frs2, Label* L) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ma_compareF64(scratch, cc, frs1, frs2);
+ ma_b(scratch, Imm32(1), L, Equal);
+}
+void MacroAssembler::branchFloat(DoubleCondition cc, FloatRegister frs1,
+ FloatRegister frs2, Label* L) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ma_compareF32(scratch, cc, frs1, frs2);
+ ma_b(scratch, Imm32(1), L, Equal);
+}
+void MacroAssembler::branchMulPtr(Condition cond, Register src, Register dest,
+ Label* label) {
+ MOZ_ASSERT(cond == Assembler::Overflow);
+ ma_mulPtrTestOverflow(dest, dest, src, label);
+}
+void MacroAssembler::branchNeg32(Condition cond, Register reg, Label* label) {
+ MOZ_ASSERT(cond == Overflow);
+ neg32(reg);
+ branch32(Assembler::Equal, reg, Imm32(INT32_MIN), label);
+}
+
+void MacroAssembler::branchPrivatePtr(Condition cond, const Address& lhs,
+ Register rhs, Label* label) {
+ branchPtr(cond, lhs, rhs, label);
+}
+
+template <class L>
+void MacroAssembler::branchPtr(Condition cond, Register lhs, Register rhs,
+ L label) {
+ ma_b(lhs, rhs, label, cond);
+}
+
+void MacroAssembler::branchPtr(Condition cond, Register lhs, Imm32 rhs,
+ Label* label) {
+ ma_b(lhs, rhs, label, cond);
+}
+
+void MacroAssembler::branchPtr(Condition cond, Register lhs, ImmPtr rhs,
+ Label* label) {
+ if (rhs.value == nullptr && (cond == Zero || cond == NonZero)) {
+ ma_b(lhs, lhs, label, cond);
+ } else {
+ ma_b(lhs, rhs, label, cond);
+ }
+}
+
+void MacroAssembler::branchPtr(Condition cond, Register lhs, ImmGCPtr rhs,
+ Label* label) {
+ ma_b(lhs, rhs, label, cond);
+}
+
+void MacroAssembler::branchPtr(Condition cond, Register lhs, ImmWord rhs,
+ Label* label) {
+ ma_b(lhs, rhs, label, cond);
+}
+
+template <class L>
+void MacroAssembler::branchPtr(Condition cond, const Address& lhs, Register rhs,
+ L label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ loadPtr(lhs, scratch2);
+ branchPtr(cond, scratch2, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmPtr rhs,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ loadPtr(lhs, scratch2);
+ branchPtr(cond, scratch2, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmGCPtr rhs,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ loadPtr(lhs, scratch2);
+ branchPtr(cond, scratch2, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmWord rhs,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ loadPtr(lhs, scratch2);
+ branchPtr(cond, scratch2, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs,
+ Register rhs, Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ loadPtr(lhs, scratch2);
+ branchPtr(cond, scratch2, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs,
+ ImmWord rhs, Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ loadPtr(lhs, scratch2);
+ branchPtr(cond, scratch2, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, wasm::SymbolicAddress lhs,
+ Register rhs, Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ loadPtr(lhs, scratch2);
+ branchPtr(cond, scratch2, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const BaseIndex& lhs,
+ Register rhs, Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ loadPtr(lhs, scratch2);
+ branchPtr(cond, scratch2, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const BaseIndex& lhs,
+ ImmWord rhs, Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ loadPtr(lhs, scratch2);
+ branchPtr(cond, scratch2, rhs, label);
+}
+
+void MacroAssembler::branchTest32(Condition cond, const AbsoluteAddress& lhs,
+ Imm32 rhs, Label* label) {
+ MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed ||
+ cond == NotSigned);
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ load32(lhs, scratch2);
+ and32(rhs, scratch2);
+ ma_b(scratch2, scratch2, label, cond);
+}
+
+void MacroAssembler::branchTest32(Condition cond, const Address& lhs, Imm32 rhs,
+ Label* label) {
+ MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed ||
+ cond == NotSigned);
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ load32(lhs, scratch2);
+ and32(rhs, scratch2);
+ ma_b(scratch2, scratch2, label, cond);
+}
+void MacroAssembler::branchTestBigInt(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_BIGINT), label, cond);
+}
+
+void MacroAssembler::branchTestBigInt(Condition cond, const ValueOperand& value,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ splitTag(value, scratch2);
+ branchTestBigInt(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestBigInt(Condition cond, const Address& address,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ Register tag = extractTag(address, scratch2);
+ branchTestBigInt(cond, tag, label);
+}
+
+void MacroAssembler::branchTestBigInt(Condition cond, const BaseIndex& address,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ computeEffectiveAddress(address, scratch2);
+ splitTag(scratch2, scratch2);
+ branchTestBigInt(cond, scratch2, label);
+}
+void MacroAssembler::branchTestBigIntTruthy(bool b, const ValueOperand& value,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ unboxBigInt(value, scratch2);
+ load32(Address(scratch2, BigInt::offsetOfDigitLength()), scratch2);
+ ma_b(scratch2, Imm32(0), label, b ? NotEqual : Equal);
+}
+void MacroAssembler::branchTestBoolean(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_BOOLEAN), label, cond);
+}
+
+void MacroAssembler::branchTestBoolean(Condition cond,
+ const ValueOperand& value,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ splitTag(value, scratch2);
+ branchTestBoolean(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestBoolean(Condition cond, const Address& address,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ Register tag = extractTag(address, scratch2);
+ branchTestBoolean(cond, tag, label);
+}
+
+void MacroAssembler::branchTestBoolean(Condition cond, const BaseIndex& address,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ Register tag = extractTag(address, scratch2);
+ branchTestBoolean(cond, tag, label);
+}
+void MacroAssembler::branchTestBooleanTruthy(bool b, const ValueOperand& value,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ unboxBoolean(value, scratch2);
+ ma_b(scratch2, scratch2, label, b ? NonZero : Zero);
+}
+void MacroAssembler::branchTestDouble(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ Condition actual = (cond == Equal) ? BelowOrEqual : Above;
+ ma_b(tag, ImmTag(JSVAL_TAG_MAX_DOUBLE), label, actual);
+}
+
+void MacroAssembler::branchTestDouble(Condition cond, const ValueOperand& value,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ splitTag(value, scratch2);
+ branchTestDouble(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestDouble(Condition cond, const Address& address,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ Register tag = extractTag(address, scratch2);
+ branchTestDouble(cond, tag, label);
+}
+
+void MacroAssembler::branchTestDouble(Condition cond, const BaseIndex& address,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ Register tag = extractTag(address, scratch2);
+ branchTestDouble(cond, tag, label);
+}
+
+void MacroAssembler::branchTestDoubleTruthy(bool b, FloatRegister value,
+ Label* label) {
+ ScratchDoubleScope fpscratch(*this);
+ loadConstantDouble(0.0, fpscratch);
+ DoubleCondition cond = b ? DoubleNotEqual : DoubleEqualOrUnordered;
+ branchDouble(cond, value, fpscratch, label);
+}
+void MacroAssembler::branchTestGCThing(Condition cond, const Address& address,
+ Label* label) {
+ branchTestGCThingImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestGCThing(Condition cond, const BaseIndex& address,
+ Label* label) {
+ branchTestGCThingImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestGCThing(Condition cond,
+ const ValueOperand& address,
+ Label* label) {
+ branchTestGCThingImpl(cond, address, label);
+}
+void MacroAssembler::branchTestInt32(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_INT32), label, cond);
+}
+
+void MacroAssembler::branchTestInt32(Condition cond, const ValueOperand& value,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ splitTag(value, scratch2);
+ branchTestInt32(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestInt32(Condition cond, const Address& address,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ Register tag = extractTag(address, scratch2);
+ branchTestInt32(cond, tag, label);
+}
+
+void MacroAssembler::branchTestInt32(Condition cond, const BaseIndex& address,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ Register tag = extractTag(address, scratch2);
+ branchTestInt32(cond, tag, label);
+}
+void MacroAssembler::branchTestInt32Truthy(bool b, const ValueOperand& value,
+ Label* label) {
+ ScratchRegisterScope scratch(*this);
+ ExtractBits(scratch, value.valueReg(), 0, 32);
+ ma_b(scratch, scratch, label, b ? NonZero : Zero);
+}
+void MacroAssembler::branchTestMagic(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_MAGIC), label, cond);
+}
+
+void MacroAssembler::branchTestMagic(Condition cond, const Address& address,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ Register tag = extractTag(address, scratch2);
+ branchTestMagic(cond, tag, label);
+}
+
+void MacroAssembler::branchTestMagic(Condition cond, const BaseIndex& address,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ Register tag = extractTag(address, scratch2);
+ branchTestMagic(cond, tag, label);
+}
+
+template <class L>
+void MacroAssembler::branchTestMagic(Condition cond, const ValueOperand& value,
+ L label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ splitTag(value, scratch2);
+ ma_b(scratch2, ImmTag(JSVAL_TAG_MAGIC), label, cond);
+}
+
+void MacroAssembler::branchTestMagic(Condition cond, const Address& valaddr,
+ JSWhyMagic why, Label* label) {
+ uint64_t magic = MagicValue(why).asRawBits();
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ loadPtr(valaddr, scratch);
+ ma_b(scratch, ImmWord(magic), label, cond);
+}
+void MacroAssembler::branchTestNull(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_NULL), label, cond);
+}
+
+void MacroAssembler::branchTestNull(Condition cond, const ValueOperand& value,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ splitTag(value, scratch2);
+ branchTestNull(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestNull(Condition cond, const Address& address,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ Register tag = extractTag(address, scratch2);
+ branchTestNull(cond, tag, label);
+}
+
+void MacroAssembler::branchTestNull(Condition cond, const BaseIndex& address,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ Register tag = extractTag(address, scratch2);
+ branchTestNull(cond, tag, label);
+}
+void MacroAssembler::branchTestNumber(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ Condition actual = cond == Equal ? BelowOrEqual : Above;
+ ma_b(tag, ImmTag(JS::detail::ValueUpperInclNumberTag), label, actual);
+}
+
+void MacroAssembler::branchTestNumber(Condition cond, const ValueOperand& value,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ splitTag(value, scratch2);
+ branchTestNumber(cond, scratch2, label);
+}
+void MacroAssembler::branchTestObject(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_OBJECT), label, cond);
+}
+
+void MacroAssembler::branchTestObject(Condition cond, const ValueOperand& value,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ splitTag(value, scratch2);
+ branchTestObject(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestObject(Condition cond, const Address& address,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ Register tag = extractTag(address, scratch2);
+ branchTestObject(cond, tag, label);
+}
+
+void MacroAssembler::branchTestObject(Condition cond, const BaseIndex& address,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ Register tag = extractTag(address, scratch2);
+ branchTestObject(cond, tag, label);
+}
+void MacroAssembler::branchTestPrimitive(Condition cond,
+ const ValueOperand& value,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ splitTag(value, scratch2);
+ branchTestPrimitive(cond, scratch2, label);
+}
+void MacroAssembler::branchTestPrimitive(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JS::detail::ValueUpperExclPrimitiveTag), label,
+ (cond == Equal) ? Below : AboveOrEqual);
+}
+template <class L>
+void MacroAssembler::branchTestPtr(Condition cond, Register lhs, Register rhs,
+ L label) {
+ MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed ||
+ cond == NotSigned);
+ if (lhs == rhs) {
+ ma_b(lhs, rhs, label, cond);
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ma_and(scratch, lhs, Operand(rhs));
+ ma_b(scratch, scratch, label, cond);
+ }
+}
+
+void MacroAssembler::branchTestPtr(Condition cond, Register lhs, Imm32 rhs,
+ Label* label) {
+ MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed ||
+ cond == NotSigned);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ma_and(scratch, lhs, rhs);
+ ma_b(scratch, scratch, label, cond);
+}
+
+void MacroAssembler::branchTestPtr(Condition cond, const Address& lhs,
+ Imm32 rhs, Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ loadPtr(lhs, scratch2);
+ branchTestPtr(cond, scratch2, rhs, label);
+}
+void MacroAssembler::branchTestString(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_STRING), label, cond);
+}
+
+void MacroAssembler::branchTestString(Condition cond, const ValueOperand& value,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ splitTag(value, scratch2);
+ branchTestString(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestString(Condition cond, const Address& address,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ Register tag = extractTag(address, scratch2);
+ branchTestString(cond, tag, label);
+}
+
+void MacroAssembler::branchTestString(Condition cond, const BaseIndex& address,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ Register tag = extractTag(address, scratch2);
+ branchTestString(cond, tag, label);
+}
+void MacroAssembler::branchTestStringTruthy(bool b, const ValueOperand& value,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ unboxString(value, scratch2);
+ load32(Address(scratch2, JSString::offsetOfLength()), scratch2);
+ ma_b(scratch2, Imm32(0), label, b ? NotEqual : Equal);
+}
+void MacroAssembler::branchTestSymbol(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_SYMBOL), label, cond);
+}
+
+void MacroAssembler::branchTestSymbol(Condition cond, const ValueOperand& value,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ splitTag(value, scratch2);
+ branchTestSymbol(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestSymbol(Condition cond, const BaseIndex& address,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ Register tag = extractTag(address, scratch2);
+ branchTestSymbol(cond, tag, label);
+}
+
+void MacroAssembler::branchTestSymbol(Condition cond, const Address& address,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ Register tag = extractTag(address, scratch2);
+ branchTestSymbol(cond, tag, label);
+}
+void MacroAssembler::branchTestUndefined(Condition cond,
+ const ValueOperand& value,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ splitTag(value, scratch2);
+ branchTestUndefined(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestUndefined(Condition cond, const Address& address,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ Register tag = extractTag(address, scratch2);
+ branchTestUndefined(cond, tag, label);
+}
+
+void MacroAssembler::branchTestUndefined(Condition cond,
+ const BaseIndex& address,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ Register tag = extractTag(address, scratch2);
+ branchTestUndefined(cond, tag, label);
+}
+void MacroAssembler::branchTestUndefined(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_UNDEFINED), label, cond);
+}
+void MacroAssembler::branchTestValue(Condition cond, const BaseIndex& lhs,
+ const ValueOperand& rhs, Label* label) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ branchPtr(cond, lhs, rhs.valueReg(), label);
+}
+void MacroAssembler::branchToComputedAddress(const BaseIndex& addr) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ loadPtr(addr, scratch2);
+ branch(scratch2);
+}
+void MacroAssembler::branchTruncateDoubleMaybeModUint32(FloatRegister src,
+ Register dest,
+ Label* fail) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Trunc_w_d(dest, src, scratch);
+ ma_b(scratch, Imm32(0), fail, Assembler::Equal);
+}
+
+void MacroAssembler::branchTruncateDoubleToInt32(FloatRegister src,
+ Register dest, Label* fail) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Trunc_w_d(dest, src, scratch);
+ ma_b(scratch, Imm32(0), fail, Assembler::Equal);
+}
+void MacroAssembler::branchTruncateFloat32MaybeModUint32(FloatRegister src,
+ Register dest,
+ Label* fail) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Trunc_w_s(dest, src, scratch);
+ ma_b(scratch, Imm32(0), fail, Assembler::Equal);
+}
+
+void MacroAssembler::branchTruncateFloat32ToInt32(FloatRegister src,
+ Register dest, Label* fail) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Trunc_w_s(dest, src, scratch);
+ ma_b(scratch, Imm32(0), fail, Assembler::Equal);
+}
+
+void MacroAssembler::byteSwap16SignExtend(Register src) {
+ JitSpew(JitSpew_Codegen, "[ %s\n", __FUNCTION__);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Register scratch2 = temps.Acquire();
+ // src 0xFFFFFFFFFFFF8000
+ andi(scratch, src, 0xFF); //
+ slli(scratch, scratch, 8); // scratch 0x00
+ ma_li(scratch2, 0xFF00); // scratch2 0xFF00
+ and_(src, src, scratch2); // src 0x8000
+ srli(src, src, 8); // src 0x0080
+ or_(src, src, scratch); // src 0x0080
+ slliw(src, src, 16);
+ sraiw(src, src, 16);
+ JitSpew(JitSpew_Codegen, "]");
+}
+
+void MacroAssembler::byteSwap16ZeroExtend(Register src) {
+ JitSpew(JitSpew_Codegen, "[ %s\n", __FUNCTION__);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Register scratch2 = temps.Acquire();
+ andi(scratch, src, 0xFF);
+ slli(scratch, scratch, 8);
+ ma_li(scratch2, 0xFF00);
+ and_(src, src, scratch2);
+ srli(src, src, 8);
+ or_(src, src, scratch);
+ slliw(src, src, 16);
+ srliw(src, src, 16);
+ JitSpew(JitSpew_Codegen, "]");
+}
+
+void MacroAssembler::byteSwap32(Register src) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ByteSwap(src, src, 4, scratch);
+}
+void MacroAssembler::byteSwap64(Register64 src) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ByteSwap(src.reg, src.reg, 8, scratch);
+}
+void MacroAssembler::clampIntToUint8(Register reg) {
+ // If reg is < 0, then we want to clamp to 0.
+ Label skip, skip2;
+ slti(ScratchRegister, reg, 0);
+ ma_branch(&skip, NotEqual, ScratchRegister, Operand(1));
+ ma_li(reg, Imm32(0));
+ jump(&skip2);
+ bind(&skip);
+ // If reg is >= 255, then we want to clamp to 255.
+ ma_branch(&skip2, LessThanOrEqual, reg, Operand(255));
+ ma_li(reg, Imm32(255));
+ bind(&skip2);
+}
+
+void MacroAssembler::clz32(Register src, Register dest, bool knownNotZero) {
+ Clz32(dest, src);
+}
+void MacroAssembler::clz64(Register64 src, Register dest) {
+ Clz64(dest, src.reg);
+}
+
+void MacroAssembler::ctz64(Register64 src, Register dest) {
+ Ctz64(dest, src.reg);
+}
+
+void MacroAssembler::cmp16Set(Condition cond, Address lhs, Imm32 rhs,
+ Register dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ MOZ_ASSERT(scratch2 != lhs.base);
+
+ switch (cond) {
+ case Assembler::Equal:
+ case Assembler::NotEqual:
+ case Assembler::Above:
+ case Assembler::AboveOrEqual:
+ case Assembler::Below:
+ case Assembler::BelowOrEqual:
+ load16ZeroExtend(lhs, scratch2);
+ ma_cmp_set(dest, scratch2, Imm32(uint16_t(rhs.value)), cond);
+ break;
+
+ case Assembler::GreaterThan:
+ case Assembler::GreaterThanOrEqual:
+ case Assembler::LessThan:
+ case Assembler::LessThanOrEqual:
+ load16SignExtend(lhs, scratch2);
+ ma_cmp_set(dest, scratch2, Imm32(int16_t(rhs.value)), cond);
+ break;
+
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+void MacroAssembler::cmp32Load32(Condition cond, Register lhs,
+ const Address& rhs, const Address& src,
+ Register dest) {
+ ScratchRegisterScope scratch(*this);
+ MOZ_ASSERT(lhs != scratch && dest != scratch);
+ load32(rhs, scratch);
+ cmp32Load32(cond, lhs, scratch, src, dest);
+}
+
+void MacroAssembler::cmp32Load32(Condition cond, Register lhs, Register rhs,
+ const Address& src, Register dest) {
+ Label skip;
+ branch32(Assembler::InvertCondition(cond), lhs, rhs, &skip);
+ load32(src, dest);
+ bind(&skip);
+}
+
+void MacroAssembler::cmp32LoadPtr(Condition cond, const Address& lhs, Imm32 rhs,
+ const Address& src, Register dest) {
+ Label skip;
+ branch32(Assembler::InvertCondition(cond), lhs, rhs, &skip);
+ loadPtr(src, dest);
+ bind(&skip);
+}
+
+void MacroAssembler::cmp32Move32(Condition cond, Register lhs, Register rhs,
+ Register src, Register dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ cmp32Set(cond, lhs, rhs, scratch2);
+ moveIfNotZero(dest, src, scratch2);
+}
+
+void MacroAssembler::cmp32Move32(Condition cond, Register lhs,
+ const Address& rhs, Register src,
+ Register dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ MOZ_ASSERT(lhs != scratch2 && src != scratch2 && dest != scratch2);
+ load32(rhs, scratch2);
+ cmp32Move32(cond, lhs, scratch2, src, dest);
+}
+void MacroAssembler::cmp32MovePtr(Condition cond, Register lhs, Imm32 rhs,
+ Register src, Register dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ cmp32Set(cond, lhs, rhs, scratch2);
+ moveIfNotZero(dest, src, scratch2);
+}
+void MacroAssembler::cmp64Set(Condition cond, Address lhs, Imm64 rhs,
+ Register dest) {
+ ma_cmp_set(dest, lhs, ImmWord(uint64_t(rhs.value)), cond);
+}
+void MacroAssembler::cmp8Set(Condition cond, Address lhs, Imm32 rhs,
+ Register dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ MOZ_ASSERT(scratch2 != lhs.base);
+
+ switch (cond) {
+ case Assembler::Equal:
+ case Assembler::NotEqual:
+ case Assembler::Above:
+ case Assembler::AboveOrEqual:
+ case Assembler::Below:
+ case Assembler::BelowOrEqual:
+ load8ZeroExtend(lhs, scratch2);
+ ma_cmp_set(dest, scratch2, Imm32(uint8_t(rhs.value)), cond);
+ break;
+
+ case Assembler::GreaterThan:
+ case Assembler::GreaterThanOrEqual:
+ case Assembler::LessThan:
+ case Assembler::LessThanOrEqual:
+ load8SignExtend(lhs, scratch2);
+ ma_cmp_set(dest, scratch2, Imm32(int8_t(rhs.value)), cond);
+ break;
+
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+void MacroAssembler::cmpPtrMovePtr(Condition cond, Register lhs, Register rhs,
+ Register src, Register dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ cmpPtrSet(cond, lhs, rhs, scratch2);
+ moveIfNotZero(dest, src, scratch2);
+}
+
+void MacroAssembler::cmpPtrMovePtr(Condition cond, Register lhs,
+ const Address& rhs, Register src,
+ Register dest) {
+ MOZ_CRASH("NYI");
+}
+void MacroAssembler::ctz32(Register, Register, bool) { MOZ_CRASH(); }
+void MacroAssembler::decBranchPtr(Condition cond, Register lhs, Imm32 rhs,
+ Label* label) {
+ subPtr(rhs, lhs);
+ branchPtr(cond, lhs, Imm32(0), label);
+}
+void MacroAssembler::divFloat32(FloatRegister src, FloatRegister dest) {
+ fdiv_s(dest, dest, src);
+}
+
+void MacroAssembler::divDouble(FloatRegister src, FloatRegister dest) {
+ fdiv_d(dest, dest, src);
+}
+void MacroAssembler::fallibleUnboxPtr(const ValueOperand& src, Register dest,
+ JSValueType type, Label* fail) {
+ MOZ_ASSERT(type == JSVAL_TYPE_OBJECT || type == JSVAL_TYPE_STRING ||
+ type == JSVAL_TYPE_SYMBOL || type == JSVAL_TYPE_BIGINT);
+ // dest := src XOR mask
+ // scratch := dest >> JSVAL_TAG_SHIFT
+ // fail if scratch != 0
+ //
+ // Note: src and dest can be the same register
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ MOZ_ASSERT(src.valueReg() != scratch);
+ mov(ImmWord(JSVAL_TYPE_TO_SHIFTED_TAG(type)), scratch);
+ xor_(dest, src.valueReg(), scratch);
+ srli(scratch, dest, JSVAL_TAG_SHIFT);
+ ma_b(scratch, Imm32(0), fail, Assembler::NotEqual);
+}
+
+void MacroAssembler::fallibleUnboxPtr(const Address& src, Register dest,
+ JSValueType type, Label* fail) {
+ loadValue(src, ValueOperand(dest));
+ fallibleUnboxPtr(ValueOperand(dest), dest, type, fail);
+}
+
+void MacroAssembler::fallibleUnboxPtr(const BaseIndex& src, Register dest,
+ JSValueType type, Label* fail) {
+ loadValue(src, ValueOperand(dest));
+ fallibleUnboxPtr(ValueOperand(dest), dest, type, fail);
+}
+void MacroAssembler::flexibleLshift32(Register src, Register dest) {
+ lshift32(src, dest);
+}
+void MacroAssembler::flexibleRshift32Arithmetic(Register src, Register dest) {
+ rshift32Arithmetic(src, dest);
+}
+void MacroAssembler::flexibleRshift32(Register src, Register dest) {
+ rshift32(src, dest);
+}
+void MacroAssembler::inc64(AbsoluteAddress dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Register scratch2 = temps.Acquire();
+ ma_li(scratch, ImmWord(uintptr_t(dest.addr)));
+ ld(scratch2, scratch, 0);
+ addi(scratch2, scratch2, 1);
+ sd(scratch2, scratch, 0);
+}
+
+void MacroAssembler::load32SignExtendToPtr(const Address& src, Register dest) {
+ load32(src, dest);
+}
+void MacroAssembler::loadAbiReturnAddress(Register dest) { movePtr(ra, dest); }
+
+void MacroAssembler::lshift32(Register src, Register dest) {
+ sllw(dest, dest, src);
+}
+
+void MacroAssembler::lshift32(Imm32 imm, Register dest) {
+ slliw(dest, dest, imm.value % 32);
+}
+void MacroAssembler::lshift64(Register shift, Register64 dest) {
+ sll(dest.reg, dest.reg, shift);
+}
+
+void MacroAssembler::lshift64(Imm32 imm, Register64 dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ slli(dest.reg, dest.reg, imm.value);
+}
+void MacroAssembler::lshiftPtr(Register shift, Register dest) {
+ sll(dest, dest, shift);
+}
+
+void MacroAssembler::lshiftPtr(Imm32 imm, Register dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ slli(dest, dest, imm.value);
+}
+void MacroAssembler::maxDouble(FloatRegister other, FloatRegister srcDest,
+ bool handleNaN) {
+ Float64Max(srcDest, srcDest, other);
+}
+void MacroAssembler::maxFloat32(FloatRegister other, FloatRegister srcDest,
+ bool handleNaN) {
+ Float32Max(srcDest, srcDest, other);
+}
+void MacroAssembler::memoryBarrier(MemoryBarrierBits barrier) {
+ if (barrier) {
+ sync();
+ }
+}
+void MacroAssembler::minDouble(FloatRegister other, FloatRegister srcDest,
+ bool handleNaN) {
+ Float64Min(srcDest, srcDest, other);
+}
+void MacroAssembler::minFloat32(FloatRegister other, FloatRegister srcDest,
+ bool handleNaN) {
+ Float32Min(srcDest, srcDest, other);
+}
+void MacroAssembler::move16SignExtend(Register src, Register dest) {
+ slli(dest, src, xlen - 16);
+ srai(dest, dest, xlen - 16);
+}
+void MacroAssembler::move16To64SignExtend(Register src, Register64 dest) {
+ move32To64SignExtend(src, dest);
+ move16SignExtend(dest.reg, dest.reg);
+}
+void MacroAssembler::move32SignExtendToPtr(Register src, Register dest) {
+ slliw(dest, src, 0);
+}
+void MacroAssembler::move32To64SignExtend(Register src, Register64 dest) {
+ slliw(dest.reg, src, 0);
+}
+void MacroAssembler::move32To64ZeroExtend(Register src, Register64 dest) {
+ slli(dest.reg, src, 32);
+ srli(dest.reg, dest.reg, 32);
+}
+void MacroAssembler::move32ZeroExtendToPtr(Register src, Register dest) {
+ slli(dest, src, 32);
+ srli(dest, dest, 32);
+}
+void MacroAssembler::move64(Register64 src, Register64 dest) {
+ movePtr(src.reg, dest.reg);
+}
+
+void MacroAssembler::move64(Imm64 imm, Register64 dest) {
+ movePtr(ImmWord(imm.value), dest.reg);
+}
+
+void MacroAssembler::move64To32(Register64 src, Register dest) {
+ slliw(dest, src.reg, 0);
+}
+
+void MacroAssembler::move8SignExtend(Register src, Register dest) {
+ slli(dest, src, xlen - 8);
+ srai(dest, dest, xlen - 8);
+}
+void MacroAssembler::move8To64SignExtend(Register src, Register64 dest) {
+ move32To64SignExtend(src, dest);
+ move8SignExtend(dest.reg, dest.reg);
+}
+void MacroAssembler::moveDoubleToGPR64(FloatRegister src, Register64 dest) {
+ fmv_x_d(dest.reg, src);
+}
+
+void MacroAssembler::moveGPR64ToDouble(Register64 src, FloatRegister dest) {
+ fmv_d_x(dest, src.reg);
+}
+void MacroAssembler::moveFloat32ToGPR(FloatRegister src, Register dest) {
+ fmv_x_w(dest, src);
+}
+void MacroAssembler::moveGPRToFloat32(Register src, FloatRegister dest) {
+ fmv_w_x(dest, src);
+}
+void MacroAssembler::mul32(Register rhs, Register srcDest) {
+ mulw(srcDest, srcDest, rhs);
+}
+
+void MacroAssembler::mul32(Imm32 imm, Register srcDest) {
+ ScratchRegisterScope scratch(asMasm());
+ move32(imm, scratch);
+ mul32(scratch, srcDest);
+}
+
+void MacroAssembler::mulHighUnsigned32(Imm32 imm, Register src, Register dest) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_li(scratch, uint32_t(imm.value));
+ mul(dest, src, scratch);
+ srli(dest, dest, 32);
+}
+
+void MacroAssembler::mul64(Imm64 imm, const Register64& dest) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(dest.reg != scratch);
+ mov(ImmWord(imm.value), scratch);
+ mul(dest.reg, dest.reg, scratch);
+}
+
+void MacroAssembler::mul64(Imm64 imm, const Register64& dest,
+ const Register temp) {
+ MOZ_ASSERT(temp == Register::Invalid());
+ mul64(imm, dest);
+}
+
+void MacroAssembler::mul64(const Register64& src, const Register64& dest,
+ const Register temp) {
+ MOZ_ASSERT(temp == Register::Invalid());
+ mul(dest.reg, dest.reg, src.reg);
+}
+
+void MacroAssembler::mul64(const Operand& src, const Register64& dest,
+ const Register temp) {
+ if (src.is_mem()) {
+ ScratchRegisterScope scratch(asMasm());
+ Register64 scratch64(scratch);
+
+ load64(src.toAddress(), scratch64);
+ mul64(scratch64, dest, temp);
+ } else {
+ mul64(Register64(src.toReg()), dest, temp);
+ }
+}
+void MacroAssembler::mulBy3(Register src, Register dest) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(src != scratch);
+ add(scratch, src, src);
+ add(dest, scratch, src);
+}
+void MacroAssembler::mulDouble(FloatRegister src, FloatRegister dest) {
+ fmul_d(dest, dest, src);
+}
+void MacroAssembler::mulDoublePtr(ImmPtr imm, Register temp,
+ FloatRegister dest) {
+ ScratchRegisterScope scratch(asMasm());
+ ScratchDoubleScope fpscratch(asMasm());
+ movePtr(imm, scratch);
+ loadDouble(Address(scratch, 0), fpscratch);
+ mulDouble(fpscratch, dest);
+}
+void MacroAssembler::mulFloat32(FloatRegister src, FloatRegister dest) {
+ fmul_s(dest, dest, src);
+}
+void MacroAssembler::mulPtr(Register rhs, Register srcDest) {
+ mul(srcDest, srcDest, rhs);
+}
+
+void MacroAssembler::negateDouble(FloatRegister reg) { fneg_d(reg, reg); }
+
+void MacroAssembler::negateFloat(FloatRegister reg) { fneg_s(reg, reg); }
+
+void MacroAssembler::neg64(Register64 reg) { sub(reg.reg, zero, reg.reg); }
+
+void MacroAssembler::negPtr(Register reg) { sub(reg, zero, reg); }
+
+void MacroAssembler::neg32(Register reg) { subw(reg, zero, reg); }
+void MacroAssembler::not32(Register reg) { nor(reg, reg, zero); }
+
+void MacroAssembler::notPtr(Register reg) { nor(reg, reg, zero); }
+
+void MacroAssembler::or32(Register src, Register dest) {
+ ma_or(dest, dest, src);
+}
+
+void MacroAssembler::or32(Imm32 imm, Register dest) { ma_or(dest, dest, imm); }
+
+void MacroAssembler::or32(Imm32 imm, const Address& dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ load32(dest, scratch2);
+ ma_or(scratch2, imm);
+ store32(scratch2, dest);
+}
+
+void MacroAssembler::or64(Register64 src, Register64 dest) {
+ ma_or(dest.reg, dest.reg, src.reg);
+}
+
+void MacroAssembler::or64(const Operand& src, Register64 dest) {
+ if (src.is_mem()) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Register64 scratch64(scratch);
+
+ load64(src.toAddress(), scratch64);
+ or64(scratch64, dest);
+ } else {
+ or64(Register64(src.toReg()), dest);
+ }
+}
+void MacroAssembler::or64(Imm64 imm, Register64 dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ma_li(scratch, ImmWord(imm.value));
+ ma_or(dest.reg, dest.reg, scratch);
+}
+
+void MacroAssembler::orPtr(Register src, Register dest) {
+ ma_or(dest, dest, src);
+}
+
+void MacroAssembler::orPtr(Imm32 imm, Register dest) { ma_or(dest, dest, imm); }
+
+void MacroAssembler::patchSub32FromStackPtr(CodeOffset, Imm32) { MOZ_CRASH(); }
+void MacroAssembler::popcnt32(Register input, Register output, Register tmp) {
+ Popcnt32(output, input, tmp);
+}
+void MacroAssembler::popcnt64(Register64 input, Register64 output,
+ Register tmp) {
+ Popcnt64(output.reg, input.reg, tmp);
+}
+void MacroAssembler::quotient32(Register rhs, Register srcDest,
+ bool isUnsigned) {
+ if (isUnsigned) {
+ ma_divu32(srcDest, srcDest, rhs);
+ } else {
+ ma_div32(srcDest, srcDest, rhs);
+ }
+}
+
+void MacroAssembler::remainder32(Register rhs, Register srcDest,
+ bool isUnsigned) {
+ if (isUnsigned) {
+ ma_modu32(srcDest, srcDest, rhs);
+ } else {
+ ma_mod32(srcDest, srcDest, rhs);
+ }
+}
+void MacroAssembler::rotateLeft64(Imm32 count, Register64 src, Register64 dest,
+ Register temp) {
+ Dror(dest.reg, src.reg, Operand(64 - (count.value % 64)));
+}
+void MacroAssembler::rotateLeft64(Register count, Register64 src,
+ Register64 dest, Register temp) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_mod32(scratch, count, Operand(64));
+ negw(scratch, scratch);
+ addi(scratch, scratch, 64);
+ Dror(dest.reg, src.reg, Operand(scratch));
+}
+
+void MacroAssembler::rotateLeft(Imm32 count, Register input, Register dest) {
+ JitSpew(JitSpew_Codegen, "[ rotateLeft\n");
+ Ror(dest, input, Operand(32 - (count.value % 32)));
+ JitSpew(JitSpew_Codegen, "]\n");
+}
+void MacroAssembler::rotateLeft(Register count, Register input, Register dest) {
+ JitSpew(JitSpew_Codegen, "[ rotateLeft\n");
+ ScratchRegisterScope scratch(asMasm());
+ ma_mod32(scratch, count, Operand(32));
+ negw(scratch, scratch);
+ addi(scratch, scratch, 32);
+ Ror(dest, input, Operand(scratch));
+ JitSpew(JitSpew_Codegen, "]\n");
+}
+void MacroAssembler::rotateRight64(Register count, Register64 src,
+ Register64 dest, Register temp) {
+ Dror(dest.reg, src.reg, Operand(count));
+}
+void MacroAssembler::rotateRight64(Imm32 count, Register64 src, Register64 dest,
+ Register temp) {
+ Dror(dest.reg, src.reg, Operand(count.value));
+}
+void MacroAssembler::rotateRight(Imm32 count, Register input, Register dest) {
+ Ror(dest, input, Operand(count.value));
+}
+void MacroAssembler::rotateRight(Register count, Register input,
+ Register dest) {
+ Ror(dest, input, Operand(count));
+}
+void MacroAssembler::rshift32Arithmetic(Register src, Register dest) {
+ sraw(dest, dest, src);
+}
+
+void MacroAssembler::rshift32Arithmetic(Imm32 imm, Register dest) {
+ sraiw(dest, dest, imm.value % 32);
+}
+void MacroAssembler::rshift32(Register src, Register dest) {
+ srlw(dest, dest, src);
+}
+
+void MacroAssembler::rshift32(Imm32 imm, Register dest) {
+ srliw(dest, dest, imm.value % 32);
+}
+
+void MacroAssembler::rshift64Arithmetic(Imm32 imm, Register64 dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ srai(dest.reg, dest.reg, imm.value);
+}
+
+void MacroAssembler::rshift64Arithmetic(Register shift, Register64 dest) {
+ sra(dest.reg, dest.reg, shift);
+}
+
+void MacroAssembler::rshift64(Register shift, Register64 dest) {
+ srl(dest.reg, dest.reg, shift);
+}
+
+void MacroAssembler::rshift64(Imm32 imm, Register64 dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ srli(dest.reg, dest.reg, imm.value);
+}
+
+void MacroAssembler::rshiftPtrArithmetic(Imm32 imm, Register dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ srai(dest, dest, imm.value);
+}
+void MacroAssembler::rshiftPtr(Register shift, Register dest) {
+ srl(dest, dest, shift);
+}
+
+void MacroAssembler::rshiftPtr(Imm32 imm, Register dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ srli(dest, dest, imm.value);
+}
+void MacroAssembler::spectreBoundsCheck32(Register index, Register length,
+ Register maybeScratch,
+ Label* failure) {
+ MOZ_RELEASE_ASSERT(!JitOptions.spectreIndexMasking);
+ branch32(Assembler::BelowOrEqual, length, index, failure);
+}
+
+void MacroAssembler::spectreBoundsCheck32(Register index, const Address& length,
+ Register maybeScratch,
+ Label* failure) {
+ MOZ_RELEASE_ASSERT(!JitOptions.spectreIndexMasking);
+ branch32(Assembler::BelowOrEqual, length, index, failure);
+}
+void MacroAssembler::spectreBoundsCheckPtr(Register index, Register length,
+ Register maybeScratch,
+ Label* failure) {
+ MOZ_RELEASE_ASSERT(!JitOptions.spectreIndexMasking);
+ branchPtr(Assembler::BelowOrEqual, length, index, failure);
+}
+
+void MacroAssembler::spectreBoundsCheckPtr(Register index,
+ const Address& length,
+ Register maybeScratch,
+ Label* failure) {
+ MOZ_RELEASE_ASSERT(!JitOptions.spectreIndexMasking);
+ branchPtr(Assembler::BelowOrEqual, length, index, failure);
+}
+void MacroAssembler::spectreMovePtr(Condition, Register, Register) {
+ MOZ_CRASH("spectreMovePtr");
+}
+void MacroAssembler::spectreZeroRegister(Condition cond, Register scratch,
+ Register dest) {
+ MOZ_CRASH("spectreZeroRegister");
+}
+void MacroAssembler::sqrtDouble(FloatRegister src, FloatRegister dest) {
+ fsqrt_d(dest, src);
+}
+void MacroAssembler::sqrtFloat32(FloatRegister src, FloatRegister dest) {
+ fsqrt_s(dest, src);
+}
+void MacroAssembler::storeUncanonicalizedFloat32(FloatRegister src,
+ const Address& addr) {
+ ma_fst_s(src, addr);
+}
+void MacroAssembler::storeUncanonicalizedFloat32(FloatRegister src,
+ const BaseIndex& addr) {
+ ma_fst_s(src, addr);
+}
+
+void MacroAssembler::storeUncanonicalizedDouble(FloatRegister src,
+ const Address& addr) {
+ ma_fst_d(src, addr);
+}
+void MacroAssembler::storeUncanonicalizedDouble(FloatRegister src,
+ const BaseIndex& addr) {
+ ma_fst_d(src, addr);
+}
+void MacroAssembler::sub32(Register src, Register dest) {
+ subw(dest, dest, src);
+}
+
+void MacroAssembler::sub32(Imm32 imm, Register dest) {
+ ma_sub32(dest, dest, imm);
+}
+
+void MacroAssembler::sub32(const Address& src, Register dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ load32(src, scratch);
+ subw(dest, dest, scratch);
+}
+
+void MacroAssembler::sub64(Register64 src, Register64 dest) {
+ sub(dest.reg, dest.reg, src.reg);
+}
+
+void MacroAssembler::sub64(const Operand& src, Register64 dest) {
+ if (src.is_mem()) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Register64 scratch64(scratch);
+
+ load64(src.toAddress(), scratch64);
+ sub64(scratch64, dest);
+ } else {
+ sub64(Register64(src.toReg()), dest);
+ }
+}
+
+void MacroAssembler::sub64(Imm64 imm, Register64 dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ MOZ_ASSERT(dest.reg != scratch);
+ ma_li(scratch, ImmWord(imm.value));
+ sub(dest.reg, dest.reg, scratch);
+}
+
+void MacroAssembler::subDouble(FloatRegister src, FloatRegister dest) {
+ fsub_d(dest, dest, src);
+}
+
+void MacroAssembler::subFloat32(FloatRegister src, FloatRegister dest) {
+ fsub_s(dest, dest, src);
+}
+
+void MacroAssembler::subPtr(Register src, const Address& dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ loadPtr(dest, scratch);
+ subPtr(src, scratch);
+ storePtr(scratch, dest);
+}
+
+void MacroAssembler::subPtr(const Address& addr, Register dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ loadPtr(addr, scratch);
+ subPtr(scratch, dest);
+}
+void MacroAssembler::subPtr(Imm32 imm, Register dest) {
+ ma_sub64(dest, dest, imm);
+}
+void MacroAssembler::subPtr(Register src, Register dest) {
+ sub(dest, dest, src);
+}
+void MacroAssembler::test32LoadPtr(Condition cond, const Address& addr,
+ Imm32 mask, const Address& src,
+ Register dest) {
+ MOZ_RELEASE_ASSERT(!JitOptions.spectreStringMitigations);
+ Label skip;
+ branchTest32(Assembler::InvertCondition(cond), addr, mask, &skip);
+ loadPtr(src, dest);
+ bind(&skip);
+}
+void MacroAssembler::test32MovePtr(Condition, const Address&, Imm32, Register,
+ Register) {
+ MOZ_CRASH();
+}
+void MacroAssembler::xor32(Register src, Register dest) {
+ ma_xor(dest, dest, src);
+}
+
+void MacroAssembler::xor32(Imm32 imm, Register dest) {
+ ma_xor(dest, dest, imm);
+}
+
+void MacroAssembler::xor32(Imm32 imm, const Address& dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ load32(dest, scratch2);
+ xor32(imm, scratch2);
+ store32(scratch2, dest);
+}
+
+void MacroAssembler::xor32(const Address& src, Register dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ load32(src, scratch2);
+ xor32(scratch2, dest);
+}
+void MacroAssembler::xor64(Register64 src, Register64 dest) {
+ ma_xor(dest.reg, dest.reg, src.reg);
+}
+
+void MacroAssembler::xor64(const Operand& src, Register64 dest) {
+ if (src.is_mem()) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Register64 scratch64(scratch);
+
+ load64(src.toAddress(), scratch64);
+ xor64(scratch64, dest);
+ } else {
+ xor64(Register64(src.toReg()), dest);
+ }
+}
+void MacroAssembler::xor64(Imm64 imm, Register64 dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ma_li(scratch, ImmWord(imm.value));
+ ma_xor(dest.reg, dest.reg, scratch);
+}
+void MacroAssembler::xorPtr(Register src, Register dest) {
+ ma_xor(dest, dest, src);
+}
+
+void MacroAssembler::xorPtr(Imm32 imm, Register dest) {
+ ma_xor(dest, dest, imm);
+}
+//}}} check_macroassembler_style
+
+void MacroAssemblerRiscv64Compat::incrementInt32Value(const Address& addr) {
+ asMasm().add32(Imm32(1), addr);
+}
+
+void MacroAssemblerRiscv64Compat::retn(Imm32 n) {
+ // pc <- [sp]; sp += n
+ loadPtr(Address(StackPointer, 0), ra);
+ asMasm().addPtr(n, StackPointer);
+ jr(ra, 0);
+}
+} // namespace jit
+} // namespace js
+
+#endif /* jit_riscv64_MacroAssembler_riscv64_inl_h */
diff --git a/js/src/jit/riscv64/MacroAssembler-riscv64.cpp b/js/src/jit/riscv64/MacroAssembler-riscv64.cpp
new file mode 100644
index 0000000000..c7879fd5cc
--- /dev/null
+++ b/js/src/jit/riscv64/MacroAssembler-riscv64.cpp
@@ -0,0 +1,6515 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#include "jit/riscv64/MacroAssembler-riscv64.h"
+
+#include "jsmath.h"
+#include "jit/Bailouts.h"
+#include "jit/BaselineFrame.h"
+#include "jit/JitFrames.h"
+#include "jit/JitRuntime.h"
+#include "jit/MacroAssembler.h"
+#include "jit/MoveEmitter.h"
+#include "jit/riscv64/SharedICRegisters-riscv64.h"
+#include "util/Memory.h"
+#include "vm/JitActivation.h" // jit::JitActivation
+#include "vm/JSContext.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+namespace js {
+namespace jit {
+
+MacroAssembler& MacroAssemblerRiscv64::asMasm() {
+ return *static_cast<MacroAssembler*>(this);
+}
+
+const MacroAssembler& MacroAssemblerRiscv64::asMasm() const {
+ return *static_cast<const MacroAssembler*>(this);
+}
+
+void MacroAssemblerRiscv64::ma_cmp_set(Register rd, Register rj, ImmWord imm,
+ Condition c) {
+ if (imm.value <= INT32_MAX) {
+ ma_cmp_set(rd, rj, Imm32(uint32_t(imm.value)), c);
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ma_li(scratch, imm);
+ ma_cmp_set(rd, rj, scratch, c);
+ }
+}
+
+void MacroAssemblerRiscv64::ma_cmp_set(Register rd, Register rj, ImmPtr imm,
+ Condition c) {
+ ma_cmp_set(rd, rj, ImmWord(uintptr_t(imm.value)), c);
+}
+
+void MacroAssemblerRiscv64::ma_cmp_set(Register rd, Address address, Imm32 imm,
+ Condition c) {
+ // TODO(loong64): 32-bit ma_cmp_set?
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ ma_load(scratch2, address, SizeWord);
+ ma_cmp_set(rd, Register(scratch2), imm, c);
+}
+
+void MacroAssemblerRiscv64::ma_cmp_set(Register rd, Address address,
+ ImmWord imm, Condition c) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ ma_load(scratch2, address, SizeDouble);
+ ma_cmp_set(rd, Register(scratch2), imm, c);
+}
+
+void MacroAssemblerRiscv64::ma_cmp_set(Register rd, Register rj, Imm32 imm,
+ Condition c) {
+ if (imm.value == 0) {
+ switch (c) {
+ case Equal:
+ case BelowOrEqual:
+ ma_sltu(rd, rj, Operand(1));
+ break;
+ case NotEqual:
+ case Above:
+ sltu(rd, zero, rj);
+ break;
+ case AboveOrEqual:
+ case Below:
+ ori(rd, zero, c == AboveOrEqual ? 1 : 0);
+ break;
+ case GreaterThan:
+ case LessThanOrEqual:
+ slt(rd, zero, rj);
+ if (c == LessThanOrEqual) {
+ xori(rd, rd, 1);
+ }
+ break;
+ case LessThan:
+ case GreaterThanOrEqual:
+ slt(rd, rj, zero);
+ if (c == GreaterThanOrEqual) {
+ xori(rd, rd, 1);
+ }
+ break;
+ case Zero:
+ ma_sltu(rd, rj, Operand(1));
+ break;
+ case NonZero:
+ sltu(rd, zero, rj);
+ break;
+ case Signed:
+ slt(rd, rj, zero);
+ break;
+ case NotSigned:
+ slt(rd, rj, zero);
+ xori(rd, rd, 1);
+ break;
+ default:
+ MOZ_CRASH("Invalid condition.");
+ }
+ return;
+ }
+
+ switch (c) {
+ case Equal:
+ case NotEqual:
+ ma_xor(rd, rj, imm);
+ if (c == Equal) {
+ ma_sltu(rd, rd, Operand(1));
+ } else {
+ sltu(rd, zero, rd);
+ }
+ break;
+ case Zero:
+ case NonZero:
+ case Signed:
+ case NotSigned:
+ MOZ_CRASH("Invalid condition.");
+ default:
+ Condition cond = ma_cmp(rd, rj, imm, c);
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+
+ if (cond == Equal) xori(rd, rd, 1);
+ }
+}
+
+Assembler::Condition MacroAssemblerRiscv64::ma_cmp(Register dest, Register lhs,
+ Register rhs, Condition c) {
+ switch (c) {
+ case Above:
+ // bgtu s,t,label =>
+ // sltu at,t,s
+ // bne at,$zero,offs
+ sltu(dest, rhs, lhs);
+ return NotEqual;
+ case AboveOrEqual:
+ // bgeu s,t,label =>
+ // sltu at,s,t
+ // beq at,$zero,offs
+ sltu(dest, lhs, rhs);
+ return Equal;
+ case Below:
+ // bltu s,t,label =>
+ // sltu at,s,t
+ // bne at,$zero,offs
+ sltu(dest, lhs, rhs);
+ return NotEqual;
+ case BelowOrEqual:
+ // bleu s,t,label =>
+ // sltu at,t,s
+ // beq at,$zero,offs
+ sltu(dest, rhs, lhs);
+ return Equal;
+ case GreaterThan:
+ // bgt s,t,label =>
+ // slt at,t,s
+ // bne at,$zero,offs
+ slt(dest, rhs, lhs);
+ return NotEqual;
+ case GreaterThanOrEqual:
+ // bge s,t,label =>
+ // slt at,s,t
+ // beq at,$zero,offs
+ slt(dest, lhs, rhs);
+ return Equal;
+ case LessThan:
+ // blt s,t,label =>
+ // slt at,s,t
+ // bne at,$zero,offs
+ slt(dest, lhs, rhs);
+ return NotEqual;
+ case LessThanOrEqual:
+ // ble s,t,label =>
+ // slt at,t,s
+ // beq at,$zero,offs
+ slt(dest, rhs, lhs);
+ return Equal;
+ default:
+ MOZ_CRASH("Invalid condition.");
+ }
+ return Always;
+}
+
+Assembler::Condition MacroAssemblerRiscv64::ma_cmp(Register dest, Register lhs,
+ Imm32 imm, Condition c) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ MOZ_RELEASE_ASSERT(lhs != scratch);
+
+ switch (c) {
+ case Above:
+ case BelowOrEqual:
+ if (imm.value != 0x7fffffff && is_intn(imm.value + 1, 12) &&
+ imm.value != -1) {
+ // lhs <= rhs via lhs < rhs + 1 if rhs + 1 does not overflow
+ ma_sltu(dest, lhs, Operand(imm.value + 1));
+
+ return (c == BelowOrEqual ? NotEqual : Equal);
+ } else {
+ ma_li(scratch, imm);
+ sltu(dest, scratch, lhs);
+ return (c == BelowOrEqual ? Equal : NotEqual);
+ }
+ case AboveOrEqual:
+ case Below:
+ if (is_intn(imm.value, 12)) {
+ ma_sltu(dest, lhs, Operand(imm.value));
+ } else {
+ ma_li(scratch, imm);
+ sltu(dest, lhs, scratch);
+ }
+ return (c == AboveOrEqual ? Equal : NotEqual);
+ case GreaterThan:
+ case LessThanOrEqual:
+ if (imm.value != 0x7fffffff && is_intn(imm.value + 1, 12)) {
+ // lhs <= rhs via lhs < rhs + 1.
+ ma_slt(dest, lhs, Operand(imm.value + 1));
+ return (c == LessThanOrEqual ? NotEqual : Equal);
+ } else {
+ ma_li(scratch, imm);
+ slt(dest, scratch, lhs);
+ return (c == LessThanOrEqual ? Equal : NotEqual);
+ }
+ case GreaterThanOrEqual:
+ case LessThan:
+ if (is_intn(imm.value, 12)) {
+ ma_slt(dest, lhs, imm);
+ } else {
+ ma_li(scratch, imm);
+ slt(dest, lhs, scratch);
+ }
+ return (c == GreaterThanOrEqual ? Equal : NotEqual);
+ default:
+ MOZ_CRASH("Invalid condition.");
+ }
+ return Always;
+}
+
+void MacroAssemblerRiscv64::ma_cmp_set(Register rd, Register rj, Register rk,
+ Condition c) {
+ switch (c) {
+ case Equal:
+ // seq d,s,t =>
+ // xor d,s,t
+ // sltiu d,d,1
+ xor_(rd, rj, rk);
+ ma_sltu(rd, rd, Operand(1));
+ break;
+ case NotEqual:
+ // sne d,s,t =>
+ // xor d,s,t
+ // sltu d,$zero,d
+ xor_(rd, rj, rk);
+ sltu(rd, zero, rd);
+ break;
+ case Above:
+ // sgtu d,s,t =>
+ // sltu d,t,s
+ sltu(rd, rk, rj);
+ break;
+ case AboveOrEqual:
+ // sgeu d,s,t =>
+ // sltu d,s,t
+ // xori d,d,1
+ sltu(rd, rj, rk);
+ xori(rd, rd, 1);
+ break;
+ case Below:
+ // sltu d,s,t
+ sltu(rd, rj, rk);
+ break;
+ case BelowOrEqual:
+ // sleu d,s,t =>
+ // sltu d,t,s
+ // xori d,d,1
+ sltu(rd, rk, rj);
+ xori(rd, rd, 1);
+ break;
+ case GreaterThan:
+ // sgt d,s,t =>
+ // slt d,t,s
+ slt(rd, rk, rj);
+ break;
+ case GreaterThanOrEqual:
+ // sge d,s,t =>
+ // slt d,s,t
+ // xori d,d,1
+ slt(rd, rj, rk);
+ xori(rd, rd, 1);
+ break;
+ case LessThan:
+ // slt d,s,t
+ slt(rd, rj, rk);
+ break;
+ case LessThanOrEqual:
+ // sle d,s,t =>
+ // slt d,t,s
+ // xori d,d,1
+ slt(rd, rk, rj);
+ xori(rd, rd, 1);
+ break;
+ case Zero:
+ MOZ_ASSERT(rj == rk);
+ // seq d,s,$zero =>
+ // sltiu d,s,1
+ ma_sltu(rd, rj, Operand(1));
+ break;
+ case NonZero:
+ MOZ_ASSERT(rj == rk);
+ // sne d,s,$zero =>
+ // sltu d,$zero,s
+ sltu(rd, zero, rj);
+ break;
+ case Signed:
+ MOZ_ASSERT(rj == rk);
+ slt(rd, rj, zero);
+ break;
+ case NotSigned:
+ MOZ_ASSERT(rj == rk);
+ // sge d,s,$zero =>
+ // slt d,s,$zero
+ // xori d,d,1
+ slt(rd, rj, zero);
+ xori(rd, rd, 1);
+ break;
+ default:
+ MOZ_CRASH("Invalid condition.");
+ }
+}
+
+void MacroAssemblerRiscv64::ma_compareF32(Register rd, DoubleCondition cc,
+ FloatRegister cmp1,
+ FloatRegister cmp2) {
+ switch (cc) {
+ case DoubleEqualOrUnordered:
+ case DoubleEqual:
+ feq_s(rd, cmp1, cmp2);
+ break;
+ case DoubleNotEqualOrUnordered:
+ case DoubleNotEqual: {
+ Label done;
+ CompareIsNanF32(rd, cmp1, cmp2);
+ ma_branch(&done, Equal, rd, Operand(1));
+ feq_s(rd, cmp1, cmp2);
+ bind(&done);
+ NegateBool(rd, rd);
+ break;
+ }
+ case DoubleLessThanOrUnordered:
+ case DoubleLessThan:
+ flt_s(rd, cmp1, cmp2);
+ break;
+ case DoubleGreaterThanOrEqualOrUnordered:
+ case DoubleGreaterThanOrEqual:
+ fle_s(rd, cmp2, cmp1);
+ break;
+ case DoubleLessThanOrEqualOrUnordered:
+ case DoubleLessThanOrEqual:
+ fle_s(rd, cmp1, cmp2);
+ break;
+ case DoubleGreaterThanOrUnordered:
+ case DoubleGreaterThan:
+ flt_s(rd, cmp2, cmp1);
+ break;
+ case DoubleOrdered:
+ CompareIsNotNanF32(rd, cmp1, cmp2);
+ return;
+ case DoubleUnordered:
+ CompareIsNanF32(rd, cmp1, cmp2);
+ return;
+ }
+ if (cc >= FIRST_UNORDERED && cc <= LAST_UNORDERED) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ CompareIsNanF32(scratch, cmp1, cmp2);
+ or_(rd, rd, scratch);
+ }
+}
+
+void MacroAssemblerRiscv64::ma_compareF64(Register rd, DoubleCondition cc,
+ FloatRegister cmp1,
+ FloatRegister cmp2) {
+ switch (cc) {
+ case DoubleEqualOrUnordered:
+ case DoubleEqual:
+ feq_d(rd, cmp1, cmp2);
+ break;
+ case DoubleNotEqualOrUnordered:
+ case DoubleNotEqual: {
+ Label done;
+ CompareIsNanF64(rd, cmp1, cmp2);
+ ma_branch(&done, Equal, rd, Operand(1));
+ feq_d(rd, cmp1, cmp2);
+ bind(&done);
+ NegateBool(rd, rd);
+ } break;
+ case DoubleLessThanOrUnordered:
+ case DoubleLessThan:
+ flt_d(rd, cmp1, cmp2);
+ break;
+ case DoubleGreaterThanOrEqualOrUnordered:
+ case DoubleGreaterThanOrEqual:
+ fle_d(rd, cmp2, cmp1);
+ break;
+ case DoubleLessThanOrEqualOrUnordered:
+ case DoubleLessThanOrEqual:
+ fle_d(rd, cmp1, cmp2);
+ break;
+ case DoubleGreaterThanOrUnordered:
+ case DoubleGreaterThan:
+ flt_d(rd, cmp2, cmp1);
+ break;
+ case DoubleOrdered:
+ CompareIsNotNanF64(rd, cmp1, cmp2);
+ return;
+ case DoubleUnordered:
+ CompareIsNanF64(rd, cmp1, cmp2);
+ return;
+ }
+
+ if (cc >= FIRST_UNORDERED && cc <= LAST_UNORDERED) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ CompareIsNanF64(scratch, cmp1, cmp2);
+ or_(rd, rd, scratch);
+ }
+}
+
+void MacroAssemblerRiscv64Compat::movePtr(Register src, Register dest) {
+ mv(dest, src);
+}
+void MacroAssemblerRiscv64Compat::movePtr(ImmWord imm, Register dest) {
+ ma_li(dest, imm);
+}
+
+void MacroAssemblerRiscv64Compat::movePtr(ImmGCPtr imm, Register dest) {
+ ma_li(dest, imm);
+}
+
+void MacroAssemblerRiscv64Compat::movePtr(ImmPtr imm, Register dest) {
+ movePtr(ImmWord(uintptr_t(imm.value)), dest);
+}
+void MacroAssemblerRiscv64Compat::movePtr(wasm::SymbolicAddress imm,
+ Register dest) {
+ DEBUG_PRINTF("[ %s\n", __FUNCTION__);
+ BlockTrampolinePoolScope block_trampoline_pool(this, 8);
+ append(wasm::SymbolicAccess(CodeOffset(nextOffset().getOffset()), imm));
+ ma_liPatchable(dest, ImmWord(-1), Li64);
+ DEBUG_PRINTF("]\n");
+}
+
+bool MacroAssemblerRiscv64Compat::buildOOLFakeExitFrame(void* fakeReturnAddr) {
+ asMasm().PushFrameDescriptor(FrameType::IonJS); // descriptor_
+ asMasm().Push(ImmPtr(fakeReturnAddr));
+ asMasm().Push(FramePointer);
+ return true;
+}
+
+void MacroAssemblerRiscv64Compat::convertUInt32ToDouble(Register src,
+ FloatRegister dest) {
+ fcvt_d_wu(dest, src);
+}
+
+void MacroAssemblerRiscv64Compat::convertUInt64ToDouble(Register src,
+ FloatRegister dest) {
+ fcvt_d_lu(dest, src);
+}
+
+void MacroAssemblerRiscv64Compat::convertUInt32ToFloat32(Register src,
+ FloatRegister dest) {
+ fcvt_s_wu(dest, src);
+}
+
+void MacroAssemblerRiscv64Compat::convertDoubleToFloat32(FloatRegister src,
+ FloatRegister dest) {
+ fcvt_s_d(dest, src);
+}
+
+template <typename F>
+void MacroAssemblerRiscv64::RoundHelper(FPURegister dst, FPURegister src,
+ FPURegister fpu_scratch,
+ FPURoundingMode frm) {
+ BlockTrampolinePoolScope block_trampoline_pool(this, 20);
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+
+ MOZ_ASSERT((std::is_same<float, F>::value) ||
+ (std::is_same<double, F>::value));
+ // Need at least two FPRs, so check against dst == src == fpu_scratch
+ MOZ_ASSERT(!(dst == src && dst == fpu_scratch));
+
+ const int kFloatMantissaBits =
+ sizeof(F) == 4 ? kFloat32MantissaBits : kFloat64MantissaBits;
+ const int kFloatExponentBits =
+ sizeof(F) == 4 ? kFloat32ExponentBits : kFloat64ExponentBits;
+ const int kFloatExponentBias =
+ sizeof(F) == 4 ? kFloat32ExponentBias : kFloat64ExponentBias;
+ Label done;
+
+ {
+ UseScratchRegisterScope temps2(this);
+ Register scratch = temps2.Acquire();
+ // extract exponent value of the source floating-point to scratch
+ if (std::is_same<F, double>::value) {
+ fmv_x_d(scratch, src);
+ } else {
+ fmv_x_w(scratch, src);
+ }
+ ExtractBits(scratch2, scratch, kFloatMantissaBits, kFloatExponentBits);
+ }
+
+ // if src is NaN/+-Infinity/+-Zero or if the exponent is larger than # of bits
+ // in mantissa, the result is the same as src, so move src to dest (to avoid
+ // generating another branch)
+ if (dst != src) {
+ if (std::is_same<F, double>::value) {
+ fmv_d(dst, src);
+ } else {
+ fmv_s(dst, src);
+ }
+ }
+ {
+ Label not_NaN;
+ UseScratchRegisterScope temps2(this);
+ Register scratch = temps2.Acquire();
+ // According to the wasm spec
+ // (https://webassembly.github.io/spec/core/exec/numerics.html#aux-nans)
+ // if input is canonical NaN, then output is canonical NaN, and if input is
+ // any other NaN, then output is any NaN with most significant bit of
+ // payload is 1. In RISC-V, feq_d will set scratch to 0 if src is a NaN. If
+ // src is not a NaN, branch to the label and do nothing, but if it is,
+ // fmin_d will set dst to the canonical NaN.
+ if (std::is_same<F, double>::value) {
+ feq_d(scratch, src, src);
+ bnez(scratch, &not_NaN);
+ fmin_d(dst, src, src);
+ } else {
+ feq_s(scratch, src, src);
+ bnez(scratch, &not_NaN);
+ fmin_s(dst, src, src);
+ }
+ bind(&not_NaN);
+ }
+
+ // If real exponent (i.e., scratch2 - kFloatExponentBias) is greater than
+ // kFloat32MantissaBits, it means the floating-point value has no fractional
+ // part, thus the input is already rounded, jump to done. Note that, NaN and
+ // Infinity in floating-point representation sets maximal exponent value, so
+ // they also satisfy (scratch2 - kFloatExponentBias >= kFloatMantissaBits),
+ // and JS round semantics specify that rounding of NaN (Infinity) returns NaN
+ // (Infinity), so NaN and Infinity are considered rounded value too.
+ ma_branch(&done, GreaterThanOrEqual, scratch2,
+ Operand(kFloatExponentBias + kFloatMantissaBits));
+
+ // Actual rounding is needed along this path
+
+ // old_src holds the original input, needed for the case of src == dst
+ FPURegister old_src = src;
+ if (src == dst) {
+ MOZ_ASSERT(fpu_scratch != dst);
+ fmv_d(fpu_scratch, src);
+ old_src = fpu_scratch;
+ }
+
+ // Since only input whose real exponent value is less than kMantissaBits
+ // (i.e., 23 or 52-bits) falls into this path, the value range of the input
+ // falls into that of 23- or 53-bit integers. So we round the input to integer
+ // values, then convert them back to floating-point.
+ {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ if (std::is_same<F, double>::value) {
+ fcvt_l_d(scratch, src, frm);
+ fcvt_d_l(dst, scratch, frm);
+ } else {
+ fcvt_w_s(scratch, src, frm);
+ fcvt_s_w(dst, scratch, frm);
+ }
+ }
+ // A special handling is needed if the input is a very small positive/negative
+ // number that rounds to zero. JS semantics requires that the rounded result
+ // retains the sign of the input, so a very small positive (negative)
+ // floating-point number should be rounded to positive (negative) 0.
+ // Therefore, we use sign-bit injection to produce +/-0 correctly. Instead of
+ // testing for zero w/ a branch, we just insert sign-bit for everyone on this
+ // path (this is where old_src is needed)
+ if (std::is_same<F, double>::value) {
+ fsgnj_d(dst, dst, old_src);
+ } else {
+ fsgnj_s(dst, dst, old_src);
+ }
+
+ bind(&done);
+}
+
+template <typename CvtFunc>
+void MacroAssemblerRiscv64::RoundFloatingPointToInteger(Register rd,
+ FPURegister fs,
+ Register result,
+ CvtFunc fcvt_generator,
+ bool Inexact) {
+ // Save csr_fflags to scratch & clear exception flags
+ if (result != Register::Invalid()) {
+ BlockTrampolinePoolScope block_trampoline_pool(this, 6);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+
+ int exception_flags = kInvalidOperation;
+ if (Inexact) exception_flags |= kInexact;
+ csrrci(scratch, csr_fflags, exception_flags);
+
+ // actual conversion instruction
+ fcvt_generator(this, rd, fs);
+
+ // check kInvalidOperation flag (out-of-range, NaN)
+ // set result to 1 if normal, otherwise set result to 0 for abnormal
+ frflags(result);
+ andi(result, result, exception_flags);
+ seqz(result, result); // result <-- 1 (normal), result <-- 0 (abnormal)
+
+ // restore csr_fflags
+ csrw(csr_fflags, scratch);
+ } else {
+ // actual conversion instruction
+ fcvt_generator(this, rd, fs);
+ }
+}
+
+void MacroAssemblerRiscv64::Trunc_uw_d(Register rd, FPURegister fs,
+ Register result, bool Inexact) {
+ RoundFloatingPointToInteger(
+ rd, fs, result,
+ [](MacroAssemblerRiscv64* masm, Register dst, FPURegister src) {
+ masm->fcvt_wu_d(dst, src, RTZ);
+ },
+ Inexact);
+}
+
+void MacroAssemblerRiscv64::Trunc_w_d(Register rd, FPURegister fs,
+ Register result, bool Inexact) {
+ RoundFloatingPointToInteger(
+ rd, fs, result,
+ [](MacroAssemblerRiscv64* masm, Register dst, FPURegister src) {
+ masm->fcvt_w_d(dst, src, RTZ);
+ },
+ Inexact);
+}
+
+void MacroAssemblerRiscv64::Trunc_uw_s(Register rd, FPURegister fs,
+ Register result, bool Inexact) {
+ RoundFloatingPointToInteger(
+ rd, fs, result,
+ [](MacroAssemblerRiscv64* masm, Register dst, FPURegister src) {
+ masm->fcvt_wu_s(dst, src, RTZ);
+ },
+ Inexact);
+}
+
+void MacroAssemblerRiscv64::Trunc_w_s(Register rd, FPURegister fs,
+ Register result, bool Inexact) {
+ RoundFloatingPointToInteger(
+ rd, fs, result,
+ [](MacroAssemblerRiscv64* masm, Register dst, FPURegister src) {
+ masm->fcvt_w_s(dst, src, RTZ);
+ },
+ Inexact);
+}
+void MacroAssemblerRiscv64::Trunc_ul_d(Register rd, FPURegister fs,
+ Register result, bool Inexact) {
+ RoundFloatingPointToInteger(
+ rd, fs, result,
+ [](MacroAssemblerRiscv64* masm, Register dst, FPURegister src) {
+ masm->fcvt_lu_d(dst, src, RTZ);
+ },
+ Inexact);
+}
+
+void MacroAssemblerRiscv64::Trunc_l_d(Register rd, FPURegister fs,
+ Register result, bool Inexact) {
+ RoundFloatingPointToInteger(
+ rd, fs, result,
+ [](MacroAssemblerRiscv64* masm, Register dst, FPURegister src) {
+ masm->fcvt_l_d(dst, src, RTZ);
+ },
+ Inexact);
+}
+
+void MacroAssemblerRiscv64::Trunc_ul_s(Register rd, FPURegister fs,
+ Register result, bool Inexact) {
+ RoundFloatingPointToInteger(
+ rd, fs, result,
+ [](MacroAssemblerRiscv64* masm, Register dst, FPURegister src) {
+ masm->fcvt_lu_s(dst, src, RTZ);
+ },
+ Inexact);
+}
+
+void MacroAssemblerRiscv64::Trunc_l_s(Register rd, FPURegister fs,
+ Register result, bool Inexact) {
+ RoundFloatingPointToInteger(
+ rd, fs, result,
+ [](MacroAssemblerRiscv64* masm, Register dst, FPURegister src) {
+ masm->fcvt_l_s(dst, src, RTZ);
+ },
+ Inexact);
+}
+
+void MacroAssemblerRiscv64::Floor_d_d(FPURegister dst, FPURegister src,
+ FPURegister fpu_scratch) {
+ RoundHelper<double>(dst, src, fpu_scratch, RDN);
+}
+
+void MacroAssemblerRiscv64::Ceil_d_d(FPURegister dst, FPURegister src,
+ FPURegister fpu_scratch) {
+ RoundHelper<double>(dst, src, fpu_scratch, RUP);
+}
+
+void MacroAssemblerRiscv64::Trunc_d_d(FPURegister dst, FPURegister src,
+ FPURegister fpu_scratch) {
+ RoundHelper<double>(dst, src, fpu_scratch, RTZ);
+}
+
+void MacroAssemblerRiscv64::Round_d_d(FPURegister dst, FPURegister src,
+ FPURegister fpu_scratch) {
+ RoundHelper<double>(dst, src, fpu_scratch, RNE);
+}
+
+void MacroAssemblerRiscv64::Floor_s_s(FPURegister dst, FPURegister src,
+ FPURegister fpu_scratch) {
+ RoundHelper<float>(dst, src, fpu_scratch, RDN);
+}
+
+void MacroAssemblerRiscv64::Ceil_s_s(FPURegister dst, FPURegister src,
+ FPURegister fpu_scratch) {
+ RoundHelper<float>(dst, src, fpu_scratch, RUP);
+}
+
+void MacroAssemblerRiscv64::Trunc_s_s(FPURegister dst, FPURegister src,
+ FPURegister fpu_scratch) {
+ RoundHelper<float>(dst, src, fpu_scratch, RTZ);
+}
+
+void MacroAssemblerRiscv64::Round_s_s(FPURegister dst, FPURegister src,
+ FPURegister fpu_scratch) {
+ RoundHelper<float>(dst, src, fpu_scratch, RNE);
+}
+
+void MacroAssemblerRiscv64::Round_w_s(Register rd, FPURegister fs,
+ Register result, bool Inexact) {
+ RoundFloatingPointToInteger(
+ rd, fs, result,
+ [](MacroAssemblerRiscv64* masm, Register dst, FPURegister src) {
+ masm->fcvt_w_s(dst, src, RNE);
+ },
+ Inexact);
+}
+
+void MacroAssemblerRiscv64::Round_w_d(Register rd, FPURegister fs,
+ Register result, bool Inexact) {
+ RoundFloatingPointToInteger(
+ rd, fs, result,
+ [](MacroAssemblerRiscv64* masm, Register dst, FPURegister src) {
+ masm->fcvt_w_d(dst, src, RNE);
+ },
+ Inexact);
+}
+
+void MacroAssemblerRiscv64::Ceil_w_s(Register rd, FPURegister fs,
+ Register result, bool Inexact) {
+ RoundFloatingPointToInteger(
+ rd, fs, result,
+ [](MacroAssemblerRiscv64* masm, Register dst, FPURegister src) {
+ masm->fcvt_w_s(dst, src, RUP);
+ },
+ Inexact);
+}
+
+void MacroAssemblerRiscv64::Ceil_w_d(Register rd, FPURegister fs,
+ Register result, bool Inexact) {
+ RoundFloatingPointToInteger(
+ rd, fs, result,
+ [](MacroAssemblerRiscv64* masm, Register dst, FPURegister src) {
+ masm->fcvt_w_d(dst, src, RUP);
+ },
+ Inexact);
+}
+
+void MacroAssemblerRiscv64::Floor_w_s(Register rd, FPURegister fs,
+ Register result, bool Inexact) {
+ RoundFloatingPointToInteger(
+ rd, fs, result,
+ [](MacroAssemblerRiscv64* masm, Register dst, FPURegister src) {
+ masm->fcvt_w_s(dst, src, RDN);
+ },
+ Inexact);
+}
+
+void MacroAssemblerRiscv64::Floor_w_d(Register rd, FPURegister fs,
+ Register result, bool Inexact) {
+ RoundFloatingPointToInteger(
+ rd, fs, result,
+ [](MacroAssemblerRiscv64* masm, Register dst, FPURegister src) {
+ masm->fcvt_w_d(dst, src, RDN);
+ },
+ Inexact);
+}
+
+// Checks whether a double is representable as a 32-bit integer. If so, the
+// integer is written to the output register. Otherwise, a bailout is taken to
+// the given snapshot. This function overwrites the scratch float register.
+void MacroAssemblerRiscv64Compat::convertDoubleToInt32(FloatRegister src,
+ Register dest,
+ Label* fail,
+ bool negativeZeroCheck) {
+ if (negativeZeroCheck) {
+ fclass_d(dest, src);
+ ma_b(dest, Imm32(kNegativeZero), fail, Equal);
+ }
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Trunc_w_d(dest, src, scratch, true);
+ ma_b(scratch, Imm32(0), fail, Equal);
+}
+
+void MacroAssemblerRiscv64Compat::convertDoubleToPtr(FloatRegister src,
+ Register dest, Label* fail,
+ bool negativeZeroCheck) {
+ if (negativeZeroCheck) {
+ fclass_d(dest, src);
+ ma_b(dest, Imm32(kNegativeZero), fail, Equal);
+ }
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Trunc_l_d(dest, src, scratch, true);
+ ma_b(scratch, Imm32(0), fail, Equal);
+}
+
+// Checks whether a float32 is representable as a 32-bit integer. If so, the
+// integer is written to the output register. Otherwise, a bailout is taken to
+// the given snapshot. This function overwrites the scratch float register.
+void MacroAssemblerRiscv64Compat::convertFloat32ToInt32(
+ FloatRegister src, Register dest, Label* fail, bool negativeZeroCheck) {
+ if (negativeZeroCheck) {
+ fclass_d(dest, src);
+ ma_b(dest, Imm32(kNegativeZero), fail, Equal);
+ }
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Trunc_w_s(dest, src, scratch, true);
+ ma_b(scratch, Imm32(0), fail, Equal);
+}
+
+void MacroAssemblerRiscv64Compat::convertFloat32ToDouble(FloatRegister src,
+ FloatRegister dest) {
+ fcvt_d_s(dest, src);
+}
+
+void MacroAssemblerRiscv64Compat::convertInt32ToFloat32(Register src,
+ FloatRegister dest) {
+ fcvt_s_w(dest, src);
+}
+
+void MacroAssemblerRiscv64Compat::convertInt32ToFloat32(const Address& src,
+ FloatRegister dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ load32(src, scratch);
+ fcvt_s_w(dest, scratch);
+}
+
+void MacroAssemblerRiscv64Compat::movq(Register rj, Register rd) { mv(rd, rj); }
+
+// Memory.
+void MacroAssemblerRiscv64::ma_loadDouble(FloatRegister dest, Address address) {
+ int16_t encodedOffset;
+ Register base;
+
+ if (!is_int12(address.offset)) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ ma_li(ScratchRegister, Imm32(address.offset));
+ add(ScratchRegister, address.base, ScratchRegister);
+ base = ScratchRegister;
+ encodedOffset = 0;
+ } else {
+ encodedOffset = address.offset;
+ base = address.base;
+ }
+ fld(dest, base, encodedOffset);
+}
+
+void MacroAssemblerRiscv64::ma_loadFloat(FloatRegister dest, Address address) {
+ int16_t encodedOffset;
+ Register base;
+
+ if (!is_int12(address.offset)) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ ma_li(ScratchRegister, Imm32(address.offset));
+ add(ScratchRegister, address.base, ScratchRegister);
+ base = ScratchRegister;
+ encodedOffset = 0;
+ } else {
+ encodedOffset = address.offset;
+ base = address.base;
+ }
+ flw(dest, base, encodedOffset);
+}
+
+void MacroAssemblerRiscv64::ma_load(Register dest, Address address,
+ LoadStoreSize size,
+ LoadStoreExtension extension) {
+ int16_t encodedOffset;
+ Register base;
+
+ if (!is_int12(address.offset)) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ ma_li(ScratchRegister, Imm32(address.offset));
+ add(ScratchRegister, address.base, ScratchRegister);
+ base = ScratchRegister;
+ encodedOffset = 0;
+ } else {
+ encodedOffset = address.offset;
+ base = address.base;
+ }
+
+ switch (size) {
+ case SizeByte:
+ if (ZeroExtend == extension) {
+ lbu(dest, base, encodedOffset);
+ } else {
+ lb(dest, base, encodedOffset);
+ }
+ break;
+ case SizeHalfWord:
+ if (ZeroExtend == extension) {
+ lhu(dest, base, encodedOffset);
+ } else {
+ lh(dest, base, encodedOffset);
+ }
+ break;
+ case SizeWord:
+ if (ZeroExtend == extension) {
+ lwu(dest, base, encodedOffset);
+ } else {
+ lw(dest, base, encodedOffset);
+ }
+ break;
+ case SizeDouble:
+ ld(dest, base, encodedOffset);
+ break;
+ default:
+ MOZ_CRASH("Invalid argument for ma_load");
+ }
+}
+
+void MacroAssemblerRiscv64::ma_store(Register data, const BaseIndex& dest,
+ LoadStoreSize size,
+ LoadStoreExtension extension) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ asMasm().computeScaledAddress(dest, scratch2);
+ asMasm().ma_store(data, Address(scratch2, dest.offset), size, extension);
+}
+
+void MacroAssemblerRiscv64::ma_store(Imm32 imm, const BaseIndex& dest,
+ LoadStoreSize size,
+ LoadStoreExtension extension) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Register address = temps.Acquire();
+ // Make sure that scratch contains absolute address so that
+ // offset is 0.
+ computeScaledAddress(dest, address);
+
+ // Scrach register is free now, use it for loading imm value
+ ma_li(scratch, imm);
+
+ // with offset=0 ScratchRegister will not be used in ma_store()
+ // so we can use it as a parameter here
+ ma_store(scratch, Address(address, 0), size, extension);
+}
+
+void MacroAssemblerRiscv64::ma_store(Imm32 imm, Address address,
+ LoadStoreSize size,
+ LoadStoreExtension extension) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ma_li(scratch, imm);
+ ma_store(scratch, address, size, extension);
+}
+
+void MacroAssemblerRiscv64::ma_store(Register data, Address address,
+ LoadStoreSize size,
+ LoadStoreExtension extension) {
+ int16_t encodedOffset;
+ Register base;
+
+ if (!is_int12(address.offset)) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ ma_li(ScratchRegister, Imm32(address.offset));
+ add(ScratchRegister, address.base, ScratchRegister);
+ base = ScratchRegister;
+ encodedOffset = 0;
+ } else {
+ encodedOffset = address.offset;
+ base = address.base;
+ }
+
+ switch (size) {
+ case SizeByte:
+ sb(data, base, encodedOffset);
+ break;
+ case SizeHalfWord:
+ sh(data, base, encodedOffset);
+ break;
+ case SizeWord:
+ sw(data, base, encodedOffset);
+ break;
+ case SizeDouble:
+ sd(data, base, encodedOffset);
+ break;
+ default:
+ MOZ_CRASH("Invalid argument for ma_store");
+ }
+}
+
+// Memory.
+void MacroAssemblerRiscv64::ma_storeDouble(FloatRegister dest,
+ Address address) {
+ int16_t encodedOffset;
+ Register base;
+
+ if (!is_int12(address.offset)) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ ma_li(ScratchRegister, Imm32(address.offset));
+ add(ScratchRegister, address.base, ScratchRegister);
+ base = ScratchRegister;
+ encodedOffset = 0;
+ } else {
+ encodedOffset = address.offset;
+ base = address.base;
+ }
+ fsd(dest, base, encodedOffset);
+}
+
+void MacroAssemblerRiscv64::ma_storeFloat(FloatRegister dest, Address address) {
+ int16_t encodedOffset;
+ Register base;
+
+ if (!is_int12(address.offset)) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ ma_li(ScratchRegister, Imm32(address.offset));
+ add(ScratchRegister, address.base, ScratchRegister);
+ base = ScratchRegister;
+ encodedOffset = 0;
+ } else {
+ encodedOffset = address.offset;
+ base = address.base;
+ }
+ fsw(dest, base, encodedOffset);
+}
+
+void MacroAssemblerRiscv64::computeScaledAddress(const BaseIndex& address,
+ Register dest) {
+ Register base = address.base;
+ Register index = address.index;
+ int32_t shift = Imm32::ShiftOf(address.scale).value;
+ UseScratchRegisterScope temps(this);
+ Register tmp = dest == base ? temps.Acquire() : dest;
+ if (shift) {
+ MOZ_ASSERT(shift <= 4);
+ slli(tmp, index, shift);
+ add(dest, base, tmp);
+ } else {
+ add(dest, base, index);
+ }
+}
+
+void MacroAssemblerRiscv64Compat::wasmLoadI64Impl(
+ const wasm::MemoryAccessDesc& access, Register memoryBase, Register ptr,
+ Register ptrScratch, Register64 output, Register tmp) {
+ uint32_t offset = access.offset();
+ MOZ_ASSERT(offset < asMasm().wasmMaxOffsetGuardLimit());
+ MOZ_ASSERT_IF(offset, ptrScratch != InvalidReg);
+
+ // Maybe add the offset.
+ if (offset) {
+ asMasm().addPtr(ImmWord(offset), ptrScratch);
+ ptr = ptrScratch;
+ }
+
+ asMasm().memoryBarrierBefore(access.sync());
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ switch (access.type()) {
+ case Scalar::Int8:
+ add(ScratchRegister, memoryBase, ptr);
+ lb(output.reg, ScratchRegister, 0);
+ break;
+ case Scalar::Uint8:
+ add(ScratchRegister, memoryBase, ptr);
+ lbu(output.reg, ScratchRegister, 0);
+ break;
+ case Scalar::Int16:
+ add(ScratchRegister, memoryBase, ptr);
+ lh(output.reg, ScratchRegister, 0);
+ break;
+ case Scalar::Uint16:
+ add(ScratchRegister, memoryBase, ptr);
+ lhu(output.reg, ScratchRegister, 0);
+ break;
+ case Scalar::Int32:
+ add(ScratchRegister, memoryBase, ptr);
+ lw(output.reg, ScratchRegister, 0);
+ break;
+ case Scalar::Uint32:
+ // TODO(loong64): Why need zero-extension here?
+ add(ScratchRegister, memoryBase, ptr);
+ lwu(output.reg, ScratchRegister, 0);
+ break;
+ case Scalar::Int64:
+ add(ScratchRegister, memoryBase, ptr);
+ ld(output.reg, ScratchRegister, 0);
+ break;
+ default:
+ MOZ_CRASH("unexpected array type");
+ }
+
+ asMasm().append(access, asMasm().size() - 4);
+ asMasm().memoryBarrierAfter(access.sync());
+}
+
+void MacroAssemblerRiscv64Compat::wasmStoreI64Impl(
+ const wasm::MemoryAccessDesc& access, Register64 value, Register memoryBase,
+ Register ptr, Register ptrScratch, Register tmp) {
+ uint32_t offset = access.offset();
+ MOZ_ASSERT(offset < asMasm().wasmMaxOffsetGuardLimit());
+ MOZ_ASSERT_IF(offset, ptrScratch != InvalidReg);
+
+ // Maybe add the offset.
+ if (offset) {
+ asMasm().addPtr(ImmWord(offset), ptrScratch);
+ ptr = ptrScratch;
+ }
+
+ asMasm().memoryBarrierBefore(access.sync());
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ switch (access.type()) {
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ add(ScratchRegister, memoryBase, ptr);
+ sb(value.reg, ScratchRegister, 0);
+ break;
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ add(ScratchRegister, memoryBase, ptr);
+ sh(value.reg, ScratchRegister, 0);
+ break;
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ add(ScratchRegister, memoryBase, ptr);
+ sw(value.reg, ScratchRegister, 0);
+ break;
+ case Scalar::Int64:
+ add(ScratchRegister, memoryBase, ptr);
+ sd(value.reg, ScratchRegister, 0);
+ break;
+ default:
+ MOZ_CRASH("unexpected array type");
+ }
+
+ asMasm().append(access, asMasm().size() - 4);
+ asMasm().memoryBarrierAfter(access.sync());
+}
+
+void MacroAssemblerRiscv64Compat::profilerEnterFrame(Register framePtr,
+ Register scratch) {
+ asMasm().loadJSContext(scratch);
+ loadPtr(Address(scratch, offsetof(JSContext, profilingActivation_)), scratch);
+ storePtr(framePtr,
+ Address(scratch, JitActivation::offsetOfLastProfilingFrame()));
+ storePtr(ImmPtr(nullptr),
+ Address(scratch, JitActivation::offsetOfLastProfilingCallSite()));
+}
+
+void MacroAssemblerRiscv64Compat::profilerExitFrame() {
+ jump(asMasm().runtime()->jitRuntime()->getProfilerExitFrameTail());
+}
+
+void MacroAssemblerRiscv64Compat::move32(Imm32 imm, Register dest) {
+ ma_li(dest, imm);
+}
+
+void MacroAssemblerRiscv64Compat::move32(Register src, Register dest) {
+ slliw(dest, src, 0);
+}
+
+void MacroAssemblerRiscv64Compat::load8ZeroExtend(const Address& address,
+ Register dest) {
+ ma_load(dest, address, SizeByte, ZeroExtend);
+}
+
+void MacroAssemblerRiscv64Compat::load8ZeroExtend(const BaseIndex& src,
+ Register dest) {
+ ma_load(dest, src, SizeByte, ZeroExtend);
+}
+
+void MacroAssemblerRiscv64Compat::load8SignExtend(const Address& address,
+ Register dest) {
+ ma_load(dest, address, SizeByte, SignExtend);
+}
+
+void MacroAssemblerRiscv64Compat::load8SignExtend(const BaseIndex& src,
+ Register dest) {
+ ma_load(dest, src, SizeByte, SignExtend);
+}
+
+void MacroAssemblerRiscv64Compat::load16ZeroExtend(const Address& address,
+ Register dest) {
+ ma_load(dest, address, SizeHalfWord, ZeroExtend);
+}
+
+void MacroAssemblerRiscv64Compat::load16ZeroExtend(const BaseIndex& src,
+ Register dest) {
+ ma_load(dest, src, SizeHalfWord, ZeroExtend);
+}
+
+void MacroAssemblerRiscv64Compat::load16SignExtend(const Address& address,
+ Register dest) {
+ ma_load(dest, address, SizeHalfWord, SignExtend);
+}
+
+void MacroAssemblerRiscv64Compat::load16SignExtend(const BaseIndex& src,
+ Register dest) {
+ ma_load(dest, src, SizeHalfWord, SignExtend);
+}
+
+void MacroAssemblerRiscv64Compat::load32(const Address& address,
+ Register dest) {
+ ma_load(dest, address, SizeWord);
+}
+
+void MacroAssemblerRiscv64Compat::load32(const BaseIndex& address,
+ Register dest) {
+ ma_load(dest, address, SizeWord);
+}
+
+void MacroAssemblerRiscv64Compat::load32(AbsoluteAddress address,
+ Register dest) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ movePtr(ImmPtr(address.addr), ScratchRegister);
+ load32(Address(ScratchRegister, 0), dest);
+}
+
+void MacroAssemblerRiscv64Compat::load32(wasm::SymbolicAddress address,
+ Register dest) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ movePtr(address, ScratchRegister);
+ load32(Address(ScratchRegister, 0), dest);
+}
+
+void MacroAssemblerRiscv64Compat::loadPtr(const Address& address,
+ Register dest) {
+ ma_load(dest, address, SizeDouble);
+}
+
+void MacroAssemblerRiscv64Compat::loadPtr(const BaseIndex& src, Register dest) {
+ ma_load(dest, src, SizeDouble);
+}
+
+void MacroAssemblerRiscv64Compat::loadPtr(AbsoluteAddress address,
+ Register dest) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ movePtr(ImmPtr(address.addr), ScratchRegister);
+ loadPtr(Address(ScratchRegister, 0), dest);
+}
+
+void MacroAssemblerRiscv64Compat::loadPtr(wasm::SymbolicAddress address,
+ Register dest) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ movePtr(address, ScratchRegister);
+ loadPtr(Address(ScratchRegister, 0), dest);
+}
+
+void MacroAssemblerRiscv64Compat::loadPrivate(const Address& address,
+ Register dest) {
+ loadPtr(address, dest);
+}
+
+void MacroAssemblerRiscv64Compat::store8(Imm32 imm, const Address& address) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ ma_li(ScratchRegister, imm);
+ ma_store(ScratchRegister, address, SizeByte);
+}
+
+void MacroAssemblerRiscv64Compat::store8(Register src, const Address& address) {
+ ma_store(src, address, SizeByte);
+}
+
+void MacroAssemblerRiscv64Compat::store8(Imm32 imm, const BaseIndex& dest) {
+ ma_store(imm, dest, SizeByte);
+}
+
+void MacroAssemblerRiscv64Compat::store8(Register src, const BaseIndex& dest) {
+ ma_store(src, dest, SizeByte);
+}
+
+void MacroAssemblerRiscv64Compat::store16(Imm32 imm, const Address& address) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ ma_li(ScratchRegister, imm);
+ ma_store(ScratchRegister, address, SizeHalfWord);
+}
+
+void MacroAssemblerRiscv64Compat::store16(Register src,
+ const Address& address) {
+ ma_store(src, address, SizeHalfWord);
+}
+
+void MacroAssemblerRiscv64Compat::store16(Imm32 imm, const BaseIndex& dest) {
+ ma_store(imm, dest, SizeHalfWord);
+}
+
+void MacroAssemblerRiscv64Compat::store16(Register src,
+ const BaseIndex& address) {
+ ma_store(src, address, SizeHalfWord);
+}
+
+void MacroAssemblerRiscv64Compat::store32(Register src,
+ AbsoluteAddress address) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ movePtr(ImmPtr(address.addr), ScratchRegister);
+ store32(src, Address(ScratchRegister, 0));
+}
+
+void MacroAssemblerRiscv64Compat::store32(Register src,
+ const Address& address) {
+ ma_store(src, address, SizeWord);
+}
+
+void MacroAssemblerRiscv64Compat::store32(Imm32 src, const Address& address) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ move32(src, ScratchRegister);
+ ma_store(ScratchRegister, address, SizeWord);
+}
+
+void MacroAssemblerRiscv64Compat::store32(Imm32 imm, const BaseIndex& dest) {
+ ma_store(imm, dest, SizeWord);
+}
+
+void MacroAssemblerRiscv64Compat::store32(Register src, const BaseIndex& dest) {
+ ma_store(src, dest, SizeWord);
+}
+
+template <typename T>
+void MacroAssemblerRiscv64Compat::storePtr(ImmWord imm, T address) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ ma_li(ScratchRegister, imm);
+ ma_store(ScratchRegister, address, SizeDouble);
+}
+
+template void MacroAssemblerRiscv64Compat::storePtr<Address>(ImmWord imm,
+ Address address);
+template void MacroAssemblerRiscv64Compat::storePtr<BaseIndex>(
+ ImmWord imm, BaseIndex address);
+
+template <typename T>
+void MacroAssemblerRiscv64Compat::storePtr(ImmPtr imm, T address) {
+ storePtr(ImmWord(uintptr_t(imm.value)), address);
+}
+
+template void MacroAssemblerRiscv64Compat::storePtr<Address>(ImmPtr imm,
+ Address address);
+template void MacroAssemblerRiscv64Compat::storePtr<BaseIndex>(
+ ImmPtr imm, BaseIndex address);
+
+template <typename T>
+void MacroAssemblerRiscv64Compat::storePtr(ImmGCPtr imm, T address) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ movePtr(imm, ScratchRegister);
+ storePtr(ScratchRegister, address);
+}
+
+template void MacroAssemblerRiscv64Compat::storePtr<Address>(ImmGCPtr imm,
+ Address address);
+template void MacroAssemblerRiscv64Compat::storePtr<BaseIndex>(
+ ImmGCPtr imm, BaseIndex address);
+
+void MacroAssemblerRiscv64Compat::storePtr(Register src,
+ const Address& address) {
+ ma_store(src, address, SizeDouble);
+}
+
+void MacroAssemblerRiscv64Compat::storePtr(Register src,
+ const BaseIndex& address) {
+ ma_store(src, address, SizeDouble);
+}
+
+void MacroAssemblerRiscv64Compat::storePtr(Register src, AbsoluteAddress dest) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ movePtr(ImmPtr(dest.addr), ScratchRegister);
+ storePtr(src, Address(ScratchRegister, 0));
+}
+
+void MacroAssemblerRiscv64Compat::testNullSet(Condition cond,
+ const ValueOperand& value,
+ Register dest) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ splitTag(value, ScratchRegister);
+ ma_cmp_set(dest, ScratchRegister, ImmTag(JSVAL_TAG_NULL), cond);
+}
+
+void MacroAssemblerRiscv64Compat::testObjectSet(Condition cond,
+ const ValueOperand& value,
+ Register dest) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ splitTag(value, ScratchRegister);
+ ma_cmp_set(dest, ScratchRegister, ImmTag(JSVAL_TAG_OBJECT), cond);
+}
+
+void MacroAssemblerRiscv64Compat::testUndefinedSet(Condition cond,
+ const ValueOperand& value,
+ Register dest) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ splitTag(value, ScratchRegister);
+ ma_cmp_set(dest, ScratchRegister, ImmTag(JSVAL_TAG_UNDEFINED), cond);
+}
+
+void MacroAssemblerRiscv64Compat::unboxInt32(const ValueOperand& operand,
+ Register dest) {
+ slliw(dest, operand.valueReg(), 0);
+}
+
+void MacroAssemblerRiscv64Compat::unboxInt32(Register src, Register dest) {
+ slliw(dest, src, 0);
+}
+
+void MacroAssemblerRiscv64Compat::unboxInt32(const Address& src,
+ Register dest) {
+ load32(Address(src.base, src.offset), dest);
+}
+
+void MacroAssemblerRiscv64Compat::unboxInt32(const BaseIndex& src,
+ Register dest) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ computeScaledAddress(src, ScratchRegister);
+ load32(Address(ScratchRegister, src.offset), dest);
+}
+
+void MacroAssemblerRiscv64Compat::unboxBoolean(const ValueOperand& operand,
+ Register dest) {
+ ExtractBits(dest, operand.valueReg(), 0, 32);
+}
+
+void MacroAssemblerRiscv64Compat::unboxBoolean(Register src, Register dest) {
+ ExtractBits(dest, src, 0, 32);
+}
+
+void MacroAssemblerRiscv64Compat::unboxBoolean(const Address& src,
+ Register dest) {
+ ma_load(dest, Address(src.base, src.offset), SizeWord, ZeroExtend);
+}
+
+void MacroAssemblerRiscv64Compat::unboxBoolean(const BaseIndex& src,
+ Register dest) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ computeScaledAddress(src, ScratchRegister);
+ ma_load(dest, Address(ScratchRegister, src.offset), SizeWord, ZeroExtend);
+}
+
+void MacroAssemblerRiscv64Compat::unboxDouble(const ValueOperand& operand,
+ FloatRegister dest) {
+ fmv_d_x(dest, operand.valueReg());
+}
+
+void MacroAssemblerRiscv64Compat::unboxDouble(const Address& src,
+ FloatRegister dest) {
+ ma_loadDouble(dest, Address(src.base, src.offset));
+}
+
+void MacroAssemblerRiscv64Compat::unboxDouble(const BaseIndex& src,
+ FloatRegister dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ loadPtr(src, scratch);
+ unboxDouble(ValueOperand(scratch), dest);
+}
+
+void MacroAssemblerRiscv64Compat::unboxString(const ValueOperand& operand,
+ Register dest) {
+ unboxNonDouble(operand, dest, JSVAL_TYPE_STRING);
+}
+
+void MacroAssemblerRiscv64Compat::unboxString(Register src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_STRING);
+}
+
+void MacroAssemblerRiscv64Compat::unboxString(const Address& src,
+ Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_STRING);
+}
+
+void MacroAssemblerRiscv64Compat::unboxSymbol(const ValueOperand& operand,
+ Register dest) {
+ unboxNonDouble(operand, dest, JSVAL_TYPE_SYMBOL);
+}
+
+void MacroAssemblerRiscv64Compat::unboxSymbol(Register src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_SYMBOL);
+}
+
+void MacroAssemblerRiscv64Compat::unboxSymbol(const Address& src,
+ Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_SYMBOL);
+}
+
+void MacroAssemblerRiscv64Compat::unboxBigInt(const ValueOperand& operand,
+ Register dest) {
+ unboxNonDouble(operand, dest, JSVAL_TYPE_BIGINT);
+}
+
+void MacroAssemblerRiscv64Compat::unboxBigInt(Register src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_BIGINT);
+}
+
+void MacroAssemblerRiscv64Compat::unboxBigInt(const Address& src,
+ Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_BIGINT);
+}
+
+void MacroAssemblerRiscv64Compat::unboxObject(const ValueOperand& src,
+ Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
+}
+
+void MacroAssemblerRiscv64Compat::unboxObject(Register src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
+}
+
+void MacroAssemblerRiscv64Compat::unboxObject(const Address& src,
+ Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
+}
+
+void MacroAssemblerRiscv64Compat::unboxValue(const ValueOperand& src,
+ AnyRegister dest,
+ JSValueType type) {
+ if (dest.isFloat()) {
+ Label notInt32, end;
+ asMasm().branchTestInt32(Assembler::NotEqual, src, &notInt32);
+ convertInt32ToDouble(src.valueReg(), dest.fpu());
+ ma_branch(&end);
+ bind(&notInt32);
+ unboxDouble(src, dest.fpu());
+ bind(&end);
+ } else {
+ unboxNonDouble(src, dest.gpr(), type);
+ }
+}
+
+void MacroAssemblerRiscv64Compat::boxDouble(FloatRegister src,
+ const ValueOperand& dest,
+ FloatRegister) {
+ fmv_x_d(dest.valueReg(), src);
+}
+
+void MacroAssemblerRiscv64Compat::boxNonDouble(JSValueType type, Register src,
+ const ValueOperand& dest) {
+ MOZ_ASSERT(src != dest.valueReg());
+ boxValue(type, src, dest.valueReg());
+}
+
+void MacroAssemblerRiscv64Compat::boolValueToDouble(const ValueOperand& operand,
+ FloatRegister dest) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ convertBoolToInt32(operand.valueReg(), ScratchRegister);
+ convertInt32ToDouble(ScratchRegister, dest);
+}
+
+void MacroAssemblerRiscv64Compat::int32ValueToDouble(
+ const ValueOperand& operand, FloatRegister dest) {
+ convertInt32ToDouble(operand.valueReg(), dest);
+}
+
+void MacroAssemblerRiscv64Compat::boolValueToFloat32(
+ const ValueOperand& operand, FloatRegister dest) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ convertBoolToInt32(operand.valueReg(), ScratchRegister);
+ convertInt32ToFloat32(ScratchRegister, dest);
+}
+
+void MacroAssemblerRiscv64Compat::int32ValueToFloat32(
+ const ValueOperand& operand, FloatRegister dest) {
+ convertInt32ToFloat32(operand.valueReg(), dest);
+}
+
+void MacroAssemblerRiscv64Compat::loadConstantFloat32(float f,
+ FloatRegister dest) {
+ ma_lis(dest, f);
+}
+
+void MacroAssemblerRiscv64Compat::loadInt32OrDouble(const Address& src,
+ FloatRegister dest) {
+ Label notInt32, end;
+ // If it's an int, convert it to double.
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ Register SecondScratchReg = temps.Acquire();
+ loadPtr(Address(src.base, src.offset), ScratchRegister);
+ srli(SecondScratchReg, ScratchRegister, JSVAL_TAG_SHIFT);
+ asMasm().branchTestInt32(Assembler::NotEqual, SecondScratchReg, &notInt32);
+ loadPtr(Address(src.base, src.offset), SecondScratchReg);
+ convertInt32ToDouble(SecondScratchReg, dest);
+ ma_branch(&end);
+
+ // Not an int, just load as double.
+ bind(&notInt32);
+ unboxDouble(src, dest);
+ bind(&end);
+}
+
+void MacroAssemblerRiscv64Compat::loadInt32OrDouble(const BaseIndex& addr,
+ FloatRegister dest) {
+ Label notInt32, end;
+
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ Register SecondScratchReg = temps.Acquire();
+ // If it's an int, convert it to double.
+ computeScaledAddress(addr, SecondScratchReg);
+ // Since we only have one scratch, we need to stomp over it with the tag.
+ loadPtr(Address(SecondScratchReg, 0), ScratchRegister);
+ srli(SecondScratchReg, ScratchRegister, JSVAL_TAG_SHIFT);
+ asMasm().branchTestInt32(Assembler::NotEqual, SecondScratchReg, &notInt32);
+
+ computeScaledAddress(addr, SecondScratchReg);
+ loadPtr(Address(SecondScratchReg, 0), SecondScratchReg);
+ convertInt32ToDouble(SecondScratchReg, dest);
+ ma_branch(&end);
+
+ // Not an int, just load as double.
+ bind(&notInt32);
+ // First, recompute the offset that had been stored in the scratch register
+ // since the scratch register was overwritten loading in the type.
+ computeScaledAddress(addr, SecondScratchReg);
+ unboxDouble(Address(SecondScratchReg, 0), dest);
+ bind(&end);
+}
+
+void MacroAssemblerRiscv64Compat::loadConstantDouble(double dp,
+ FloatRegister dest) {
+ ma_lid(dest, dp);
+}
+
+Register MacroAssemblerRiscv64Compat::extractObject(const Address& address,
+ Register scratch) {
+ loadPtr(Address(address.base, address.offset), scratch);
+ ExtractBits(scratch, scratch, 0, JSVAL_TAG_SHIFT);
+ return scratch;
+}
+
+Register MacroAssemblerRiscv64Compat::extractTag(const Address& address,
+ Register scratch) {
+ loadPtr(Address(address.base, address.offset), scratch);
+ ExtractBits(scratch, scratch, JSVAL_TAG_SHIFT, 64 - JSVAL_TAG_SHIFT);
+ return scratch;
+}
+
+Register MacroAssemblerRiscv64Compat::extractTag(const BaseIndex& address,
+ Register scratch) {
+ computeScaledAddress(address, scratch);
+ return extractTag(Address(scratch, address.offset), scratch);
+}
+
+/////////////////////////////////////////////////////////////////
+// X86/X64-common/ARM/LoongArch interface.
+/////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////
+// X86/X64-common/ARM/MIPS interface.
+/////////////////////////////////////////////////////////////////
+void MacroAssemblerRiscv64Compat::storeValue(ValueOperand val,
+ const BaseIndex& dest) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ computeScaledAddress(dest, ScratchRegister);
+ storeValue(val, Address(ScratchRegister, dest.offset));
+}
+
+void MacroAssemblerRiscv64Compat::storeValue(JSValueType type, Register reg,
+ BaseIndex dest) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+
+ computeScaledAddress(dest, ScratchRegister);
+
+ int32_t offset = dest.offset;
+ if (!is_int12(offset)) {
+ UseScratchRegisterScope temps(this);
+ Register SecondScratchReg = temps.Acquire();
+ ma_li(SecondScratchReg, Imm32(offset));
+ add(ScratchRegister, ScratchRegister, SecondScratchReg);
+ offset = 0;
+ }
+
+ storeValue(type, reg, Address(ScratchRegister, offset));
+}
+
+void MacroAssemblerRiscv64Compat::storeValue(ValueOperand val,
+ const Address& dest) {
+ storePtr(val.valueReg(), Address(dest.base, dest.offset));
+}
+
+void MacroAssemblerRiscv64Compat::storeValue(JSValueType type, Register reg,
+ Address dest) {
+ if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
+ store32(reg, dest);
+ JSValueShiftedTag tag = (JSValueShiftedTag)JSVAL_TYPE_TO_SHIFTED_TAG(type);
+ store32(((Imm64(tag)).secondHalf()), Address(dest.base, dest.offset + 4));
+ } else {
+ ScratchRegisterScope SecondScratchReg(asMasm());
+ MOZ_ASSERT(dest.base != SecondScratchReg);
+ ma_li(SecondScratchReg, ImmTag(JSVAL_TYPE_TO_TAG(type)));
+ slli(SecondScratchReg, SecondScratchReg, JSVAL_TAG_SHIFT);
+ InsertBits(SecondScratchReg, reg, 0, JSVAL_TAG_SHIFT);
+ storePtr(SecondScratchReg, Address(dest.base, dest.offset));
+ }
+}
+
+void MacroAssemblerRiscv64Compat::storeValue(const Value& val, Address dest) {
+ UseScratchRegisterScope temps(this);
+ Register SecondScratchReg = temps.Acquire();
+ if (val.isGCThing()) {
+ writeDataRelocation(val);
+ movWithPatch(ImmWord(val.asRawBits()), SecondScratchReg);
+ } else {
+ ma_li(SecondScratchReg, ImmWord(val.asRawBits()));
+ }
+ storePtr(SecondScratchReg, Address(dest.base, dest.offset));
+}
+
+void MacroAssemblerRiscv64Compat::storeValue(const Value& val, BaseIndex dest) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ Register SecondScratchReg = temps.Acquire();
+ computeScaledAddress(dest, ScratchRegister);
+
+ int32_t offset = dest.offset;
+ if (!is_int12(offset)) {
+ ma_li(SecondScratchReg, Imm32(offset));
+ add(ScratchRegister, ScratchRegister, SecondScratchReg);
+ offset = 0;
+ }
+ storeValue(val, Address(ScratchRegister, offset));
+}
+
+void MacroAssemblerRiscv64Compat::loadValue(const BaseIndex& addr,
+ ValueOperand val) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ computeScaledAddress(addr, ScratchRegister);
+ loadValue(Address(ScratchRegister, addr.offset), val);
+}
+
+void MacroAssemblerRiscv64Compat::loadValue(Address src, ValueOperand val) {
+ loadPtr(Address(src.base, src.offset), val.valueReg());
+}
+
+void MacroAssemblerRiscv64Compat::tagValue(JSValueType type, Register payload,
+ ValueOperand dest) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ MOZ_ASSERT(dest.valueReg() != ScratchRegister);
+ JitSpew(JitSpew_Codegen, "[ tagValue");
+ if (payload != dest.valueReg()) {
+ mv(dest.valueReg(), payload);
+ }
+ ma_li(ScratchRegister, ImmTag(JSVAL_TYPE_TO_TAG(type)));
+ InsertBits(dest.valueReg(), ScratchRegister, JSVAL_TAG_SHIFT,
+ 64 - JSVAL_TAG_SHIFT);
+ if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
+ InsertBits(dest.valueReg(), zero, 32, JSVAL_TAG_SHIFT - 32);
+ }
+ JitSpew(JitSpew_Codegen, "]");
+}
+
+void MacroAssemblerRiscv64Compat::pushValue(ValueOperand val) {
+ // Allocate stack slots for Value. One for each.
+ asMasm().subPtr(Imm32(sizeof(Value)), StackPointer);
+ // Store Value
+ storeValue(val, Address(StackPointer, 0));
+}
+
+void MacroAssemblerRiscv64Compat::pushValue(const Address& addr) {
+ // Load value before allocate stack, addr.base may be is sp.
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ loadPtr(Address(addr.base, addr.offset), ScratchRegister);
+ ma_sub64(StackPointer, StackPointer, Imm32(sizeof(Value)));
+ storePtr(ScratchRegister, Address(StackPointer, 0));
+}
+
+void MacroAssemblerRiscv64Compat::popValue(ValueOperand val) {
+ ld(val.valueReg(), StackPointer, 0);
+ ma_add64(StackPointer, StackPointer, Imm32(sizeof(Value)));
+}
+
+void MacroAssemblerRiscv64Compat::breakpoint(uint32_t value) { break_(value); }
+
+void MacroAssemblerRiscv64Compat::ensureDouble(const ValueOperand& source,
+ FloatRegister dest,
+ Label* failure) {
+ Label isDouble, done;
+ {
+ ScratchTagScope tag(asMasm(), source);
+ splitTagForTest(source, tag);
+ asMasm().branchTestDouble(Assembler::Equal, tag, &isDouble);
+ asMasm().branchTestInt32(Assembler::NotEqual, tag, failure);
+ }
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ unboxInt32(source, ScratchRegister);
+ convertInt32ToDouble(ScratchRegister, dest);
+ jump(&done);
+
+ bind(&isDouble);
+ unboxDouble(source, dest);
+
+ bind(&done);
+}
+
+void MacroAssemblerRiscv64Compat::handleFailureWithHandlerTail(
+ Label* profilerExitTail, Label* bailoutTail) {
+ // Reserve space for exception information.
+ int size = (sizeof(ResumeFromException) + ABIStackAlignment) &
+ ~(ABIStackAlignment - 1);
+ asMasm().subPtr(Imm32(size), StackPointer);
+ mv(a0, StackPointer); // Use a0 since it is a first function argument
+
+ // Call the handler.
+ using Fn = void (*)(ResumeFromException* rfe);
+ asMasm().setupUnalignedABICall(a1);
+ asMasm().passABIArg(a0);
+ asMasm().callWithABI<Fn, HandleException>(
+ MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+
+ Label entryFrame;
+ Label catch_;
+ Label finally;
+ Label returnBaseline;
+ Label returnIon;
+ Label bailout;
+ Label wasm;
+ Label wasmCatch;
+
+ // Already clobbered a0, so use it...
+ load32(Address(StackPointer, ResumeFromException::offsetOfKind()), a0);
+ asMasm().branch32(Assembler::Equal, a0,
+ Imm32(ExceptionResumeKind::EntryFrame), &entryFrame);
+ asMasm().branch32(Assembler::Equal, a0, Imm32(ExceptionResumeKind::Catch),
+ &catch_);
+ asMasm().branch32(Assembler::Equal, a0, Imm32(ExceptionResumeKind::Finally),
+ &finally);
+ asMasm().branch32(Assembler::Equal, a0,
+ Imm32(ExceptionResumeKind::ForcedReturnBaseline),
+ &returnBaseline);
+ asMasm().branch32(Assembler::Equal, a0,
+ Imm32(ExceptionResumeKind::ForcedReturnIon), &returnIon);
+ asMasm().branch32(Assembler::Equal, a0, Imm32(ExceptionResumeKind::Bailout),
+ &bailout);
+ asMasm().branch32(Assembler::Equal, a0, Imm32(ExceptionResumeKind::Wasm),
+ &wasm);
+ asMasm().branch32(Assembler::Equal, a0, Imm32(ExceptionResumeKind::WasmCatch),
+ &wasmCatch);
+
+ breakpoint(); // Invalid kind.
+
+ // No exception handler. Load the error value, restore state and return from
+ // the entry frame.
+ bind(&entryFrame);
+ asMasm().moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfFramePointer()),
+ FramePointer);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
+ StackPointer);
+
+ // We're going to be returning by the ion calling convention
+ ma_pop(ra);
+ jump(ra);
+ nop();
+
+ // If we found a catch handler, this must be a baseline frame. Restore
+ // state and jump to the catch block.
+ bind(&catch_);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfTarget()), a0);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfFramePointer()),
+ FramePointer);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
+ StackPointer);
+ jump(a0);
+
+ // If we found a finally block, this must be a baseline frame. Push two
+ // values expected by the finally block: the exception and BooleanValue(true).
+ bind(&finally);
+ ValueOperand exception = ValueOperand(a1);
+ loadValue(Address(sp, ResumeFromException::offsetOfException()), exception);
+
+ loadPtr(Address(sp, ResumeFromException::offsetOfTarget()), a0);
+ loadPtr(Address(sp, ResumeFromException::offsetOfFramePointer()),
+ FramePointer);
+ loadPtr(Address(sp, ResumeFromException::offsetOfStackPointer()), sp);
+
+ pushValue(exception);
+ pushValue(BooleanValue(true));
+ jump(a0);
+
+ // Return BaselineFrame->returnValue() to the caller.
+ // Used in debug mode and for GeneratorReturn.
+ Label profilingInstrumentation;
+ bind(&returnBaseline);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfFramePointer()),
+ FramePointer);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
+ StackPointer);
+ loadValue(Address(FramePointer, BaselineFrame::reverseOffsetOfReturnValue()),
+ JSReturnOperand);
+ jump(&profilingInstrumentation);
+
+ // Return the given value to the caller.
+ bind(&returnIon);
+ loadValue(Address(StackPointer, ResumeFromException::offsetOfException()),
+ JSReturnOperand);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfFramePointer()),
+ FramePointer);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
+ StackPointer);
+
+ // If profiling is enabled, then update the lastProfilingFrame to refer to
+ // caller frame before returning. This code is shared by ForcedReturnIon
+ // and ForcedReturnBaseline.
+ bind(&profilingInstrumentation);
+ {
+ Label skipProfilingInstrumentation;
+ // Test if profiler enabled.
+ AbsoluteAddress addressOfEnabled(
+ asMasm().runtime()->geckoProfiler().addressOfEnabled());
+ asMasm().branch32(Assembler::Equal, addressOfEnabled, Imm32(0),
+ &skipProfilingInstrumentation);
+ jump(profilerExitTail);
+ bind(&skipProfilingInstrumentation);
+ }
+
+ mv(StackPointer, FramePointer);
+ pop(FramePointer);
+ ret();
+
+ // If we are bailing out to baseline to handle an exception, jump to
+ // the bailout tail stub. Load 1 (true) in ReturnReg to indicate success.
+ bind(&bailout);
+ loadPtr(Address(sp, ResumeFromException::offsetOfBailoutInfo()), a2);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
+ StackPointer);
+ ma_li(ReturnReg, Imm32(1));
+ jump(bailoutTail);
+
+ // If we are throwing and the innermost frame was a wasm frame, reset SP and
+ // FP; SP is pointing to the unwound return address to the wasm entry, so
+ // we can just ret().
+ bind(&wasm);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfFramePointer()),
+ FramePointer);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
+ StackPointer);
+ ma_li(InstanceReg, ImmWord(wasm::FailInstanceReg));
+ ret();
+
+ // Found a wasm catch handler, restore state and jump to it.
+ bind(&wasmCatch);
+ loadPtr(Address(sp, ResumeFromException::offsetOfTarget()), a1);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfFramePointer()),
+ FramePointer);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
+ StackPointer);
+ jump(a1);
+}
+
+CodeOffset MacroAssemblerRiscv64Compat::toggledJump(Label* label) {
+ CodeOffset ret(nextOffset().getOffset());
+ BranchShort(label);
+ return ret;
+}
+
+CodeOffset MacroAssemblerRiscv64Compat::toggledCall(JitCode* target,
+ bool enabled) {
+ DEBUG_PRINTF("\ttoggledCall\n");
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ BlockTrampolinePoolScope block_trampoline_pool(this, 8);
+ BufferOffset bo = nextOffset();
+ CodeOffset offset(bo.getOffset());
+ addPendingJump(bo, ImmPtr(target->raw()), RelocationKind::JITCODE);
+ ma_liPatchable(ScratchRegister, ImmPtr(target->raw()));
+ if (enabled) {
+ jalr(ScratchRegister);
+ } else {
+ nop();
+ }
+ MOZ_ASSERT_IF(!oom(), nextOffset().getOffset() - offset.offset() ==
+ ToggledCallSize(nullptr));
+ return offset;
+}
+
+void MacroAssembler::subFromStackPtr(Imm32 imm32) {
+ if (imm32.value) {
+ asMasm().subPtr(imm32, StackPointer);
+ }
+}
+
+void MacroAssembler::clampDoubleToUint8(FloatRegister input, Register output) {
+ JitSpew(JitSpew_Codegen, "[ clampDoubleToUint8");
+ Label nan, done;
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ feq_d(scratch, input, input);
+ beqz(scratch, &nan);
+ addi(zero, scratch, 0x11);
+ Round_w_d(output, input);
+ clampIntToUint8(output);
+ ma_branch(&done);
+ // Input is nan
+ bind(&nan);
+ mv(output, zero_reg);
+ bind(&done);
+ JitSpew(JitSpew_Codegen, "]");
+}
+
+//{{{ check_macroassembler_style
+// ===============================================================
+// MacroAssembler high-level usage.
+bool MacroAssembler::convertUInt64ToDoubleNeedsTemp() { return false; }
+CodeOffset MacroAssembler::call(Label* label) {
+ BranchAndLink(label);
+ return CodeOffset(currentOffset());
+}
+CodeOffset MacroAssembler::call(Register reg) {
+ jalr(reg, 0);
+ return CodeOffset(currentOffset());
+}
+CodeOffset MacroAssembler::call(wasm::SymbolicAddress target) {
+ UseScratchRegisterScope temps(this);
+ temps.Exclude(GeneralRegisterSet(1 << CallReg.code()));
+ movePtr(target, CallReg);
+ return call(CallReg);
+}
+CodeOffset MacroAssembler::farJumpWithPatch() {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Register scratch2 = temps.Acquire();
+ // Allocate space which will be patched by patchFarJump().
+ CodeOffset farJump(nextInstrOffset(5).getOffset());
+ auipc(scratch, 0);
+ lw(scratch2, scratch, 4 * sizeof(Instr));
+ add(scratch, scratch, scratch2);
+ jr(scratch, 0);
+ spew(".space 32bit initValue 0xffff ffff");
+ emit(UINT32_MAX);
+ return farJump;
+}
+CodeOffset MacroAssembler::moveNearAddressWithPatch(Register dest) {
+ return movWithPatch(ImmPtr(nullptr), dest);
+}
+CodeOffset MacroAssembler::nopPatchableToCall() {
+ BlockTrampolinePoolScope block_trampoline_pool(this, 7);
+ // riscv64
+ nop(); // lui(rd, (int32_t)high_20);
+ nop(); // addi(rd, rd, low_12); // 31 bits in rd.
+ nop(); // slli(rd, rd, 11); // Space for next 11 bis
+ nop(); // ori(rd, rd, b11); // 11 bits are put in. 42 bit in rd
+ nop(); // slli(rd, rd, 6); // Space for next 6 bits
+ nop(); // ori(rd, rd, a6); // 6 bits are put in. 48 bis in rd
+ nop(); // jirl
+ return CodeOffset(currentOffset());
+}
+CodeOffset MacroAssembler::wasmTrapInstruction() {
+ CodeOffset offset(currentOffset());
+ BlockTrampolinePoolScope block_trampoline_pool(this, 2);
+ break_(kWasmTrapCode); // TODO: teq(zero, zero, WASM_TRAP)
+ return offset;
+}
+size_t MacroAssembler::PushRegsInMaskSizeInBytes(LiveRegisterSet set) {
+ return set.gprs().size() * sizeof(intptr_t) + set.fpus().getPushSizeInBytes();
+}
+
+template <typename T>
+void MacroAssembler::branchValueIsNurseryCellImpl(Condition cond,
+ const T& value, Register temp,
+ Label* label) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ Label done;
+ branchTestGCThing(Assembler::NotEqual, value,
+ cond == Assembler::Equal ? &done : label);
+
+ // temp may be InvalidReg, use scratch2 instead.
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+
+ getGCThingValueChunk(value, scratch2);
+ loadPtr(Address(scratch2, gc::ChunkStoreBufferOffset), scratch2);
+ branchPtr(InvertCondition(cond), scratch2, ImmWord(0), label);
+
+ bind(&done);
+}
+
+template <typename T>
+void MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value,
+ MIRType valueType, const T& dest) {
+ MOZ_ASSERT(valueType < MIRType::Value);
+
+ if (valueType == MIRType::Double) {
+ boxDouble(value.reg().typedReg().fpu(), dest);
+ return;
+ }
+
+ if (value.constant()) {
+ storeValue(value.value(), dest);
+ } else {
+ storeValue(ValueTypeFromMIRType(valueType), value.reg().typedReg().gpr(),
+ dest);
+ }
+}
+
+template void MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value,
+ MIRType valueType,
+ const Address& dest);
+template void MacroAssembler::storeUnboxedValue(
+ const ConstantOrRegister& value, MIRType valueType,
+ const BaseObjectElementIndex& dest);
+
+// ===============================================================
+// Jit Frames.
+
+uint32_t MacroAssembler::pushFakeReturnAddress(Register scratch) {
+ CodeLabel cl;
+
+ ma_li(scratch, &cl);
+ Push(scratch);
+ bind(&cl);
+ uint32_t retAddr = currentOffset();
+
+ addCodeLabel(cl);
+ return retAddr;
+}
+
+//===============================
+// AtomicOp
+
+template <typename T>
+static void AtomicExchange(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ Scalar::Type type, const Synchronization& sync,
+ const T& mem, Register value, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
+ ScratchRegisterScope scratch(masm);
+ UseScratchRegisterScope temps(&masm);
+ Register scratch2 = temps.Acquire();
+ bool signExtend = Scalar::isSignedIntType(type);
+ unsigned nbytes = Scalar::byteSize(type);
+
+ switch (nbytes) {
+ case 1:
+ case 2:
+ break;
+ case 4:
+ MOZ_ASSERT(valueTemp == InvalidReg);
+ MOZ_ASSERT(offsetTemp == InvalidReg);
+ MOZ_ASSERT(maskTemp == InvalidReg);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ Label again;
+
+ masm.computeEffectiveAddress(mem, scratch);
+
+ if (nbytes == 4) {
+ masm.memoryBarrierBefore(sync);
+ masm.bind(&again);
+
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+
+ masm.lr_w(true, true, output, scratch);
+ masm.or_(scratch2, value, zero);
+ masm.sc_w(true, true, scratch2, scratch, scratch2);
+ masm.ma_b(scratch2, Register(scratch2), &again, Assembler::NonZero,
+ ShortJump);
+
+ masm.memoryBarrierAfter(sync);
+
+ return;
+ }
+
+ masm.andi(offsetTemp, scratch, 3);
+ masm.subPtr(offsetTemp, scratch);
+ masm.slliw(offsetTemp, offsetTemp, 3);
+ masm.ma_li(maskTemp, Imm32(UINT32_MAX >> ((4 - nbytes) * 8)));
+ masm.sllw(maskTemp, maskTemp, offsetTemp);
+ masm.nor(maskTemp, zero, maskTemp);
+ switch (nbytes) {
+ case 1:
+ masm.andi(valueTemp, value, 0xff);
+ break;
+ case 2:
+ masm.ma_and(valueTemp, value, Imm32(0xffff));
+ break;
+ }
+ masm.sllw(valueTemp, valueTemp, offsetTemp);
+
+ masm.memoryBarrierBefore(sync);
+
+ masm.bind(&again);
+
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+
+ masm.lr_w(true, true, output, scratch);
+ masm.and_(scratch2, output, maskTemp);
+ masm.or_(scratch2, scratch2, valueTemp);
+
+ masm.sc_w(true, true, scratch2, scratch, scratch2);
+
+ masm.ma_b(scratch2, Register(scratch2), &again, Assembler::NonZero,
+ ShortJump);
+
+ masm.srlw(output, output, offsetTemp);
+
+ switch (nbytes) {
+ case 1:
+ if (signExtend) {
+ masm.slliw(output, output, 32 - 8);
+ masm.sraiw(output, output, 32 - 8);
+ } else {
+ masm.andi(valueTemp, value, 0xff);
+ }
+ break;
+ case 2:
+ if (signExtend) {
+ masm.slliw(output, output, 32 - 16);
+ masm.sraiw(output, output, 32 - 16);
+ } else {
+ masm.ma_and(valueTemp, value, Imm32(0xffff));
+ }
+ break;
+ }
+
+ masm.memoryBarrierAfter(sync);
+}
+
+template <typename T>
+static void AtomicExchange64(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ const Synchronization& sync, const T& mem,
+ Register64 value, Register64 output) {
+ MOZ_ASSERT(value != output);
+ UseScratchRegisterScope temps(&masm);
+ Register SecondScratchReg = temps.Acquire();
+ masm.computeEffectiveAddress(mem, SecondScratchReg);
+
+ Label tryAgain;
+
+ masm.memoryBarrierBefore(sync);
+
+ masm.bind(&tryAgain);
+
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+
+ masm.lr_d(true, true, output.reg, SecondScratchReg);
+ masm.movePtr(value.reg, ScratchRegister);
+ masm.sc_d(true, true, ScratchRegister, SecondScratchReg, ScratchRegister);
+ masm.ma_b(ScratchRegister, ScratchRegister, &tryAgain, Assembler::NonZero,
+ ShortJump);
+
+ masm.memoryBarrierAfter(sync);
+}
+
+template <typename T>
+static void AtomicFetchOp64(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ const Synchronization& sync, AtomicOp op,
+ Register64 value, const T& mem, Register64 temp,
+ Register64 output) {
+ MOZ_ASSERT(value != output);
+ MOZ_ASSERT(value != temp);
+ UseScratchRegisterScope temps(&masm);
+ Register SecondScratchReg = temps.Acquire();
+ masm.computeEffectiveAddress(mem, SecondScratchReg);
+
+ Label tryAgain;
+
+ masm.memoryBarrierBefore(sync);
+
+ masm.bind(&tryAgain);
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+
+ masm.lr_d(true, true, output.reg, SecondScratchReg);
+
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.add(temp.reg, output.reg, value.reg);
+ break;
+ case AtomicFetchSubOp:
+ masm.sub(temp.reg, output.reg, value.reg);
+ break;
+ case AtomicFetchAndOp:
+ masm.and_(temp.reg, output.reg, value.reg);
+ break;
+ case AtomicFetchOrOp:
+ masm.or_(temp.reg, output.reg, value.reg);
+ break;
+ case AtomicFetchXorOp:
+ masm.xor_(temp.reg, output.reg, value.reg);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ masm.sc_d(true, true, temp.reg, SecondScratchReg, temp.reg);
+ masm.ma_b(temp.reg, temp.reg, &tryAgain, Assembler::NonZero, ShortJump);
+
+ masm.memoryBarrierAfter(sync);
+}
+
+template <typename T>
+static void AtomicEffectOp(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ Scalar::Type type, const Synchronization& sync,
+ AtomicOp op, const T& mem, Register value,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp) {
+ ScratchRegisterScope scratch(masm);
+ UseScratchRegisterScope temps(&masm);
+ Register scratch2 = temps.Acquire();
+ unsigned nbytes = Scalar::byteSize(type);
+
+ switch (nbytes) {
+ case 1:
+ case 2:
+ break;
+ case 4:
+ MOZ_ASSERT(valueTemp == InvalidReg);
+ MOZ_ASSERT(offsetTemp == InvalidReg);
+ MOZ_ASSERT(maskTemp == InvalidReg);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ Label again;
+
+ masm.computeEffectiveAddress(mem, scratch);
+
+ if (nbytes == 4) {
+ masm.memoryBarrierBefore(sync);
+ masm.bind(&again);
+
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+
+ masm.lr_w(true, true, scratch2, scratch);
+
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.addw(scratch2, scratch2, value);
+ break;
+ case AtomicFetchSubOp:
+ masm.subw(scratch2, scratch2, value);
+ break;
+ case AtomicFetchAndOp:
+ masm.and_(scratch2, scratch2, value);
+ break;
+ case AtomicFetchOrOp:
+ masm.or_(scratch2, scratch2, value);
+ break;
+ case AtomicFetchXorOp:
+ masm.xor_(scratch2, scratch2, value);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ masm.sc_w(true, true, scratch2, scratch, scratch2);
+ masm.ma_b(scratch2, Register(scratch2), &again, Assembler::NonZero,
+ ShortJump);
+
+ masm.memoryBarrierAfter(sync);
+
+ return;
+ }
+
+ masm.andi(offsetTemp, scratch, 3);
+ masm.subPtr(offsetTemp, scratch);
+ masm.slliw(offsetTemp, offsetTemp, 3);
+ masm.ma_li(maskTemp, Imm32(UINT32_MAX >> ((4 - nbytes) * 8)));
+ masm.sllw(maskTemp, maskTemp, offsetTemp);
+ masm.nor(maskTemp, zero, maskTemp);
+
+ masm.memoryBarrierBefore(sync);
+
+ masm.bind(&again);
+
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+
+ masm.lr_w(true, true, scratch2, scratch);
+ masm.srlw(valueTemp, scratch2, offsetTemp);
+
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.addw(valueTemp, valueTemp, value);
+ break;
+ case AtomicFetchSubOp:
+ masm.subw(valueTemp, valueTemp, value);
+ break;
+ case AtomicFetchAndOp:
+ masm.and_(valueTemp, valueTemp, value);
+ break;
+ case AtomicFetchOrOp:
+ masm.or_(valueTemp, valueTemp, value);
+ break;
+ case AtomicFetchXorOp:
+ masm.xor_(valueTemp, valueTemp, value);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ switch (nbytes) {
+ case 1:
+ masm.andi(valueTemp, valueTemp, 0xff);
+ break;
+ case 2:
+ masm.ma_and(valueTemp, valueTemp, Imm32(0xffff));
+ break;
+ }
+
+ masm.sllw(valueTemp, valueTemp, offsetTemp);
+
+ masm.and_(scratch2, scratch2, maskTemp);
+ masm.or_(scratch2, scratch2, valueTemp);
+
+ masm.sc_w(true, true, scratch2, scratch, scratch2);
+
+ masm.ma_b(scratch2, Register(scratch2), &again, Assembler::NonZero,
+ ShortJump);
+
+ masm.memoryBarrierAfter(sync);
+}
+
+template <typename T>
+static void AtomicFetchOp(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ Scalar::Type type, const Synchronization& sync,
+ AtomicOp op, const T& mem, Register value,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register output) {
+ ScratchRegisterScope scratch(masm);
+ UseScratchRegisterScope temps(&masm);
+ Register scratch2 = temps.Acquire();
+ bool signExtend = Scalar::isSignedIntType(type);
+ unsigned nbytes = Scalar::byteSize(type);
+
+ switch (nbytes) {
+ case 1:
+ case 2:
+ break;
+ case 4:
+ MOZ_ASSERT(valueTemp == InvalidReg);
+ MOZ_ASSERT(offsetTemp == InvalidReg);
+ MOZ_ASSERT(maskTemp == InvalidReg);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ Label again;
+
+ masm.computeEffectiveAddress(mem, scratch);
+
+ if (nbytes == 4) {
+ masm.memoryBarrierBefore(sync);
+ masm.bind(&again);
+
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+
+ masm.lr_w(true, true, output, scratch);
+
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.addw(scratch2, output, value);
+ break;
+ case AtomicFetchSubOp:
+ masm.subw(scratch2, output, value);
+ break;
+ case AtomicFetchAndOp:
+ masm.and_(scratch2, output, value);
+ break;
+ case AtomicFetchOrOp:
+ masm.or_(scratch2, output, value);
+ break;
+ case AtomicFetchXorOp:
+ masm.xor_(scratch2, output, value);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ masm.sc_w(true, true, scratch2, scratch, scratch2);
+ masm.ma_b(scratch2, Register(scratch2), &again, Assembler::NonZero,
+ ShortJump);
+
+ masm.memoryBarrierAfter(sync);
+
+ return;
+ }
+
+ masm.andi(offsetTemp, scratch, 3);
+ masm.subPtr(offsetTemp, scratch);
+ masm.slliw(offsetTemp, offsetTemp, 3);
+ masm.ma_li(maskTemp, Imm32(UINT32_MAX >> ((4 - nbytes) * 8)));
+ masm.sllw(maskTemp, maskTemp, offsetTemp);
+ masm.nor(maskTemp, zero, maskTemp);
+
+ masm.memoryBarrierBefore(sync);
+
+ masm.bind(&again);
+
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+
+ masm.lr_w(true, true, scratch2, scratch);
+ masm.srlw(output, scratch2, offsetTemp);
+
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.addw(valueTemp, output, value);
+ break;
+ case AtomicFetchSubOp:
+ masm.subw(valueTemp, output, value);
+ break;
+ case AtomicFetchAndOp:
+ masm.and_(valueTemp, output, value);
+ break;
+ case AtomicFetchOrOp:
+ masm.or_(valueTemp, output, value);
+ break;
+ case AtomicFetchXorOp:
+ masm.xor_(valueTemp, output, value);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ switch (nbytes) {
+ case 1:
+ masm.andi(valueTemp, valueTemp, 0xff);
+ break;
+ case 2:
+ masm.andi(valueTemp, valueTemp, 0xffff);
+ break;
+ }
+
+ masm.sllw(valueTemp, valueTemp, offsetTemp);
+
+ masm.and_(scratch2, scratch2, maskTemp);
+ masm.or_(scratch2, scratch2, valueTemp);
+
+ masm.sc_w(true, true, scratch2, scratch, scratch2);
+
+ masm.ma_b(scratch2, Register(scratch2), &again, Assembler::NonZero,
+ ShortJump);
+
+ switch (nbytes) {
+ case 1:
+ if (signExtend) {
+ masm.slliw(output, output, 32 - 8);
+ masm.sraiw(output, output, 32 - 8);
+ } else {
+ masm.andi(output, output, 0xff);
+ }
+ break;
+ case 2:
+ if (signExtend) {
+ masm.slliw(output, output, 32 - 16);
+ masm.sraiw(output, output, 32 - 16);
+ } else {
+ masm.andi(output, output, 0xffff);
+ }
+ break;
+ }
+
+ masm.memoryBarrierAfter(sync);
+}
+
+// ========================================================================
+// JS atomic operations.
+
+template <typename T>
+static void CompareExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
+ const Synchronization& sync, const T& mem,
+ Register oldval, Register newval,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register temp,
+ AnyRegister output) {
+ if (arrayType == Scalar::Uint32) {
+ masm.compareExchange(arrayType, sync, mem, oldval, newval, valueTemp,
+ offsetTemp, maskTemp, temp);
+ masm.convertUInt32ToDouble(temp, output.fpu());
+ } else {
+ masm.compareExchange(arrayType, sync, mem, oldval, newval, valueTemp,
+ offsetTemp, maskTemp, output.gpr());
+ }
+}
+
+template <typename T>
+static void AtomicExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
+ const Synchronization& sync, const T& mem,
+ Register value, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register temp, AnyRegister output) {
+ if (arrayType == Scalar::Uint32) {
+ masm.atomicExchange(arrayType, sync, mem, value, valueTemp, offsetTemp,
+ maskTemp, temp);
+ masm.convertUInt32ToDouble(temp, output.fpu());
+ } else {
+ masm.atomicExchange(arrayType, sync, mem, value, valueTemp, offsetTemp,
+ maskTemp, output.gpr());
+ }
+}
+
+template <typename T>
+static void AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType,
+ const Synchronization& sync, AtomicOp op,
+ Register value, const T& mem, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register temp, AnyRegister output) {
+ if (arrayType == Scalar::Uint32) {
+ masm.atomicFetchOp(arrayType, sync, op, value, mem, valueTemp, offsetTemp,
+ maskTemp, temp);
+ masm.convertUInt32ToDouble(temp, output.fpu());
+ } else {
+ masm.atomicFetchOp(arrayType, sync, op, value, mem, valueTemp, offsetTemp,
+ maskTemp, output.gpr());
+ }
+}
+
+void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
+ const Synchronization& sync, AtomicOp op,
+ Register value, const BaseIndex& mem,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp) {
+ AtomicEffectOp(*this, nullptr, arrayType, sync, op, mem, value, valueTemp,
+ offsetTemp, maskTemp);
+}
+
+void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
+ const Synchronization& sync, AtomicOp op,
+ Register value, const Address& mem,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp) {
+ AtomicEffectOp(*this, nullptr, arrayType, sync, op, mem, value, valueTemp,
+ offsetTemp, maskTemp);
+}
+void MacroAssembler::atomicExchange64(const Synchronization& sync,
+ const Address& mem, Register64 value,
+ Register64 output) {
+ AtomicExchange64(*this, nullptr, sync, mem, value, output);
+}
+
+void MacroAssembler::atomicExchange64(const Synchronization& sync,
+ const BaseIndex& mem, Register64 value,
+ Register64 output) {
+ AtomicExchange64(*this, nullptr, sync, mem, value, output);
+}
+
+void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
+ const Synchronization& sync,
+ const Address& mem, Register value,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register temp,
+ AnyRegister output) {
+ AtomicExchangeJS(*this, arrayType, sync, mem, value, valueTemp, offsetTemp,
+ maskTemp, temp, output);
+}
+
+void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
+ const Synchronization& sync,
+ const BaseIndex& mem, Register value,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register temp,
+ AnyRegister output) {
+ AtomicExchangeJS(*this, arrayType, sync, mem, value, valueTemp, offsetTemp,
+ maskTemp, temp, output);
+}
+
+void MacroAssembler::atomicExchange(Scalar::Type type,
+ const Synchronization& sync,
+ const Address& mem, Register value,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register output) {
+ AtomicExchange(*this, nullptr, type, sync, mem, value, valueTemp, offsetTemp,
+ maskTemp, output);
+}
+
+void MacroAssembler::atomicExchange(Scalar::Type type,
+ const Synchronization& sync,
+ const BaseIndex& mem, Register value,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register output) {
+ AtomicExchange(*this, nullptr, type, sync, mem, value, valueTemp, offsetTemp,
+ maskTemp, output);
+}
+
+void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
+ const Synchronization& sync, AtomicOp op,
+ Register value, const Address& mem,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register temp,
+ AnyRegister output) {
+ AtomicFetchOpJS(*this, arrayType, sync, op, value, mem, valueTemp, offsetTemp,
+ maskTemp, temp, output);
+}
+
+void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
+ const Synchronization& sync, AtomicOp op,
+ Register value, const BaseIndex& mem,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register temp,
+ AnyRegister output) {
+ AtomicFetchOpJS(*this, arrayType, sync, op, value, mem, valueTemp, offsetTemp,
+ maskTemp, temp, output);
+}
+
+void MacroAssembler::atomicFetchOp(Scalar::Type type,
+ const Synchronization& sync, AtomicOp op,
+ Register value, const Address& mem,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register output) {
+ AtomicFetchOp(*this, nullptr, type, sync, op, mem, value, valueTemp,
+ offsetTemp, maskTemp, output);
+}
+
+void MacroAssembler::atomicFetchOp(Scalar::Type type,
+ const Synchronization& sync, AtomicOp op,
+ Register value, const BaseIndex& mem,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register output) {
+ AtomicFetchOp(*this, nullptr, type, sync, op, mem, value, valueTemp,
+ offsetTemp, maskTemp, output);
+}
+void MacroAssembler::branchPtrInNurseryChunk(Condition cond, Register ptr,
+ Register temp, Label* label) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ MOZ_ASSERT(ptr != temp);
+ MOZ_ASSERT(ptr != ScratchRegister); // Both may be used internally.
+ MOZ_ASSERT(temp != ScratchRegister);
+ MOZ_ASSERT(temp != InvalidReg);
+
+ ma_and(temp, ptr, Imm32(int32_t(~gc::ChunkMask)));
+ branchPtr(InvertCondition(cond), Address(temp, gc::ChunkStoreBufferOffset),
+ zero, label);
+}
+void MacroAssembler::branchTestValue(Condition cond, const ValueOperand& lhs,
+ const Value& rhs, Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ MOZ_ASSERT(lhs.valueReg() != scratch);
+ moveValue(rhs, ValueOperand(scratch));
+ ma_b(lhs.valueReg(), scratch, label, cond);
+}
+void MacroAssembler::branchValueIsNurseryCell(Condition cond,
+ const Address& address,
+ Register temp, Label* label) {
+ branchValueIsNurseryCellImpl(cond, address, temp, label);
+}
+
+void MacroAssembler::branchValueIsNurseryCell(Condition cond,
+ ValueOperand value, Register temp,
+ Label* label) {
+ branchValueIsNurseryCellImpl(cond, value, temp, label);
+}
+void MacroAssembler::call(const Address& addr) {
+ UseScratchRegisterScope temps(this);
+ temps.Exclude(GeneralRegisterSet(1 << CallReg.code()));
+ loadPtr(addr, CallReg);
+ call(CallReg);
+}
+void MacroAssembler::call(ImmPtr target) {
+ BufferOffset bo = m_buffer.nextOffset();
+ addPendingJump(bo, target, RelocationKind::HARDCODED);
+ ma_call(target);
+}
+void MacroAssembler::call(ImmWord target) { call(ImmPtr((void*)target.value)); }
+
+void MacroAssembler::call(JitCode* c) {
+ DEBUG_PRINTF("[ %s\n", __FUNCTION__);
+ BlockTrampolinePoolScope block_trampoline_pool(this, 8);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ BufferOffset bo = m_buffer.nextOffset();
+ addPendingJump(bo, ImmPtr(c->raw()), RelocationKind::JITCODE);
+ ma_liPatchable(scratch, ImmPtr(c->raw()));
+ callJitNoProfiler(scratch);
+ DEBUG_PRINTF("]\n");
+}
+
+void MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromWasm) {
+ MOZ_ASSERT(inCall_);
+ uint32_t stackForCall = abiArgs_.stackBytesConsumedSoFar();
+
+ // Reserve place for $ra.
+ stackForCall += sizeof(intptr_t);
+
+ if (dynamicAlignment_) {
+ stackForCall += ComputeByteAlignment(stackForCall, ABIStackAlignment);
+ } else {
+ uint32_t alignmentAtPrologue = callFromWasm ? sizeof(wasm::Frame) : 0;
+ stackForCall += ComputeByteAlignment(
+ stackForCall + framePushed() + alignmentAtPrologue, ABIStackAlignment);
+ }
+
+ *stackAdjust = stackForCall;
+ reserveStack(stackForCall);
+
+ // Save $ra because call is going to clobber it. Restore it in
+ // callWithABIPost. NOTE: This is needed for calls from SharedIC.
+ // Maybe we can do this differently.
+ storePtr(ra, Address(StackPointer, stackForCall - sizeof(intptr_t)));
+
+ // Position all arguments.
+ {
+ enoughMemory_ &= moveResolver_.resolve();
+ if (!enoughMemory_) {
+ return;
+ }
+
+ MoveEmitter emitter(asMasm());
+ emitter.emit(moveResolver_);
+ emitter.finish();
+ }
+
+ assertStackAlignment(ABIStackAlignment);
+}
+
+void MacroAssembler::callWithABIPost(uint32_t stackAdjust, MoveOp::Type result,
+ bool callFromWasm) {
+ // Restore ra value (as stored in callWithABIPre()).
+ loadPtr(Address(StackPointer, stackAdjust - sizeof(intptr_t)), ra);
+
+ if (dynamicAlignment_) {
+ // Restore sp value from stack (as stored in setupUnalignedABICall()).
+ loadPtr(Address(StackPointer, stackAdjust), StackPointer);
+ // Use adjustFrame instead of freeStack because we already restored sp.
+ adjustFrame(-stackAdjust);
+ } else {
+ freeStack(stackAdjust);
+ }
+
+#ifdef DEBUG
+ MOZ_ASSERT(inCall_);
+ inCall_ = false;
+#endif
+}
+
+void MacroAssembler::callWithABINoProfiler(Register fun, MoveOp::Type result) {
+ // Load the callee in scratch2, no instruction between the movePtr and
+ // call should clobber it. Note that we can't use fun because it may be
+ // one of the IntArg registers clobbered before the call.
+ UseScratchRegisterScope temps(this);
+ temps.Exclude(GeneralRegisterSet(1 << CallReg.code()));
+ movePtr(fun, CallReg);
+
+ uint32_t stackAdjust;
+ callWithABIPre(&stackAdjust);
+ call(CallReg);
+ callWithABIPost(stackAdjust, result);
+}
+
+void MacroAssembler::callWithABINoProfiler(const Address& fun,
+ MoveOp::Type result) {
+ // Load the callee in scratch2, as above.
+ UseScratchRegisterScope temps(this);
+ temps.Exclude(GeneralRegisterSet(1 << CallReg.code()));
+ loadPtr(fun, CallReg);
+
+ uint32_t stackAdjust;
+ callWithABIPre(&stackAdjust);
+ call(CallReg);
+ callWithABIPost(stackAdjust, result);
+}
+
+void MacroAssembler::ceilDoubleToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ UseScratchRegisterScope temps(this);
+ ScratchDoubleScope fscratch(*this);
+ Label performCeil, done;
+ // If x < -1 or x > 0 then perform ceil.
+ loadConstantDouble(0, fscratch);
+ branchDouble(Assembler::DoubleGreaterThan, src, fscratch, &performCeil);
+ loadConstantDouble(-1.0, fscratch);
+ branchDouble(Assembler::DoubleLessThanOrEqual, src, fscratch, &performCeil);
+
+ Register scratch = temps.Acquire();
+ // If binary value is not zero, the input was not 0, so we bail.
+ {
+ moveFromDoubleHi(src, scratch);
+ branch32(Assembler::NotEqual, scratch, zero, fail);
+ }
+
+ bind(&performCeil);
+ Ceil_w_d(dest, src, scratch);
+ ma_b(scratch, Imm32(1), fail, NotEqual);
+ bind(&done);
+}
+
+void MacroAssembler::ceilFloat32ToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ UseScratchRegisterScope temps(this);
+ ScratchDoubleScope fscratch(*this);
+ Label performCeil, done;
+ // If x < -1 or x > 0 then perform ceil.
+ loadConstantFloat32(0, fscratch);
+ branchFloat(Assembler::DoubleGreaterThan, src, fscratch, &performCeil);
+ loadConstantFloat32(-1.0, fscratch);
+ branchFloat(Assembler::DoubleLessThanOrEqual, src, fscratch, &performCeil);
+
+ Register scratch = temps.Acquire();
+ // If binary value is not zero, the input was not 0, so we bail.
+ {
+ fmv_x_w(scratch, src);
+ branch32(Assembler::NotEqual, scratch, zero, fail);
+ }
+ bind(&performCeil);
+ Ceil_w_s(dest, src, scratch);
+ ma_b(scratch, Imm32(1), fail, NotEqual);
+ bind(&done);
+}
+void MacroAssembler::comment(const char* msg) { Assembler::comment(msg); }
+
+template <typename T>
+static void CompareExchange64(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ const Synchronization& sync, const T& mem,
+ Register64 expect, Register64 replace,
+ Register64 output) {
+ MOZ_ASSERT(expect != output && replace != output);
+ ScratchRegisterScope scratch(masm);
+ UseScratchRegisterScope temps(&masm);
+ Register scratch2 = temps.Acquire();
+ masm.computeEffectiveAddress(mem, scratch);
+
+ Label tryAgain;
+ Label exit;
+
+ masm.memoryBarrierBefore(sync);
+
+ masm.bind(&tryAgain);
+
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+
+ masm.lr_d(true, true, output.reg, scratch);
+
+ masm.ma_b(output.reg, expect.reg, &exit, Assembler::NotEqual, ShortJump);
+ masm.movePtr(replace.reg, scratch2);
+ masm.sc_d(true, true, scratch2, scratch, scratch2);
+ masm.ma_b(scratch2, Register(scratch2), &tryAgain, Assembler::NonZero,
+ ShortJump);
+
+ masm.memoryBarrierAfter(sync);
+
+ masm.bind(&exit);
+}
+
+void MacroAssembler::compareExchange64(const Synchronization& sync,
+ const Address& mem, Register64 expect,
+ Register64 replace, Register64 output) {
+ CompareExchange64(*this, nullptr, sync, mem, expect, replace, output);
+}
+
+void MacroAssembler::compareExchange64(const Synchronization& sync,
+ const BaseIndex& mem, Register64 expect,
+ Register64 replace, Register64 output) {
+ CompareExchange64(*this, nullptr, sync, mem, expect, replace, output);
+}
+
+void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
+ const Synchronization& sync,
+ const Address& mem, Register oldval,
+ Register newval, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register temp, AnyRegister output) {
+ CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval, valueTemp,
+ offsetTemp, maskTemp, temp, output);
+}
+
+void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
+ const Synchronization& sync,
+ const BaseIndex& mem, Register oldval,
+ Register newval, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register temp, AnyRegister output) {
+ CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval, valueTemp,
+ offsetTemp, maskTemp, temp, output);
+}
+
+void MacroAssembler::convertInt64ToDouble(Register64 src, FloatRegister dest) {
+ fcvt_d_l(dest, src.scratchReg());
+}
+void MacroAssembler::convertInt64ToFloat32(Register64 src, FloatRegister dest) {
+ fcvt_s_l(dest, src.scratchReg());
+}
+void MacroAssembler::convertIntPtrToDouble(Register src, FloatRegister dest) {
+ fcvt_d_l(dest, src);
+}
+void MacroAssembler::convertUInt64ToDouble(Register64 src, FloatRegister dest,
+ Register tmp) {
+ fcvt_d_lu(dest, src.scratchReg());
+}
+void MacroAssembler::convertUInt64ToFloat32(Register64 src, FloatRegister dest,
+ Register tmp) {
+ fcvt_s_lu(dest, src.scratchReg());
+}
+void MacroAssembler::copySignDouble(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister output) {
+ fsgnj_d(output, lhs, rhs);
+}
+void MacroAssembler::enterFakeExitFrameForWasm(Register cxreg, Register scratch,
+ ExitFrameType type) {
+ enterFakeExitFrame(cxreg, scratch, type);
+}
+void MacroAssembler::flexibleDivMod32(Register rhs, Register srcDest,
+ Register remOutput, bool isUnsigned,
+ const LiveRegisterSet&) {
+ if (isUnsigned) {
+ ma_modu32(remOutput, srcDest, rhs);
+ ma_divu32(srcDest, srcDest, rhs);
+ } else {
+ ma_mod32(remOutput, srcDest, rhs);
+ ma_div32(srcDest, srcDest, rhs);
+ }
+}
+void MacroAssembler::flexibleQuotient32(Register rhs, Register srcDest,
+ bool isUnsigned,
+ const LiveRegisterSet&) {
+ quotient32(rhs, srcDest, isUnsigned);
+}
+
+void MacroAssembler::flexibleRemainder32(Register rhs, Register srcDest,
+ bool isUnsigned,
+ const LiveRegisterSet&) {
+ remainder32(rhs, srcDest, isUnsigned);
+}
+
+void MacroAssembler::floorDoubleToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ JitSpew(JitSpew_Codegen, "[ %s", __FUNCTION__);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Floor_w_d(dest, src, scratch);
+ ma_b(scratch, Imm32(1), fail, NotEqual);
+ fmv_x_d(scratch, src);
+ ma_branch(fail, Equal, scratch, Operand(0x8000000000000000));
+ JitSpew(JitSpew_Codegen, "]");
+}
+void MacroAssembler::floorFloat32ToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ JitSpew(JitSpew_Codegen, "[ %s", __FUNCTION__);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Floor_w_s(dest, src, scratch);
+ ma_b(scratch, Imm32(1), fail, NotEqual);
+ fmv_x_w(scratch, src);
+ ma_branch(fail, Equal, scratch, Operand(int32_t(0x80000000)));
+ JitSpew(JitSpew_Codegen, "]");
+}
+void MacroAssembler::flush() {}
+void MacroAssembler::loadStoreBuffer(Register ptr, Register buffer) {
+ ma_and(buffer, ptr, Imm32(int32_t(~gc::ChunkMask)));
+ loadPtr(Address(buffer, gc::ChunkStoreBufferOffset), buffer);
+}
+
+void MacroAssembler::moveValue(const TypedOrValueRegister& src,
+ const ValueOperand& dest) {
+ if (src.hasValue()) {
+ moveValue(src.valueReg(), dest);
+ return;
+ }
+
+ MIRType type = src.type();
+ AnyRegister reg = src.typedReg();
+
+ if (!IsFloatingPointType(type)) {
+ boxNonDouble(ValueTypeFromMIRType(type), reg.gpr(), dest);
+ return;
+ }
+
+ ScratchDoubleScope fpscratch(asMasm());
+ FloatRegister scratch = fpscratch;
+ FloatRegister freg = reg.fpu();
+ if (type == MIRType::Float32) {
+ convertFloat32ToDouble(freg, scratch);
+ freg = scratch;
+ }
+ boxDouble(freg, dest, scratch);
+}
+void MacroAssembler::moveValue(const ValueOperand& src,
+ const ValueOperand& dest) {
+ if (src == dest) {
+ return;
+ }
+ movePtr(src.valueReg(), dest.valueReg());
+}
+
+void MacroAssembler::moveValue(const Value& src, const ValueOperand& dest) {
+ if (!src.isGCThing()) {
+ ma_li(dest.valueReg(), ImmWord(src.asRawBits()));
+ return;
+ }
+
+ writeDataRelocation(src);
+ movWithPatch(ImmWord(src.asRawBits()), dest.valueReg());
+}
+void MacroAssembler::nearbyIntDouble(RoundingMode, FloatRegister,
+ FloatRegister) {
+ MOZ_CRASH("not supported on this platform");
+}
+void MacroAssembler::nearbyIntFloat32(RoundingMode, FloatRegister,
+ FloatRegister) {
+ MOZ_CRASH("not supported on this platform");
+}
+
+void MacroAssembler::oolWasmTruncateCheckF32ToI32(FloatRegister input,
+ Register output,
+ TruncFlags flags,
+ wasm::BytecodeOffset off,
+ Label* rejoin) {
+ Label notNaN;
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ CompareIsNotNanF32(scratch, input, input);
+ ma_branch(&notNaN, Equal, scratch, Operand(1));
+ wasmTrap(wasm::Trap::InvalidConversionToInteger, off);
+ bind(&notNaN);
+
+ Label isOverflow;
+ const float two_31 = -float(INT32_MIN);
+ ScratchFloat32Scope fpscratch(*this);
+ if (flags & TRUNC_UNSIGNED) {
+ loadConstantFloat32(two_31 * 2, fpscratch);
+ ma_compareF32(scratch, Assembler::DoubleGreaterThanOrEqual, input,
+ fpscratch);
+ ma_branch(&isOverflow, Equal, scratch, Operand(1));
+ loadConstantFloat32(-1.0f, fpscratch);
+ ma_compareF32(scratch, Assembler::DoubleGreaterThan, input, fpscratch);
+ ma_b(scratch, Imm32(1), rejoin, Equal);
+ } else {
+ loadConstantFloat32(two_31, fpscratch);
+ ma_compareF32(scratch, Assembler::DoubleGreaterThanOrEqual, input,
+ fpscratch);
+ ma_branch(&isOverflow, Equal, scratch, Operand(1));
+ loadConstantFloat32(-two_31, fpscratch);
+ ma_compareF32(scratch, Assembler::DoubleGreaterThanOrEqual, input,
+ fpscratch);
+ ma_b(scratch, Imm32(1), rejoin, Equal);
+ }
+ bind(&isOverflow);
+ wasmTrap(wasm::Trap::IntegerOverflow, off);
+}
+
+void MacroAssembler::oolWasmTruncateCheckF64ToI32(FloatRegister input,
+ Register output,
+ TruncFlags flags,
+ wasm::BytecodeOffset off,
+ Label* rejoin) {
+ Label notNaN;
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ CompareIsNotNanF64(scratch, input, input);
+ ma_branch(&notNaN, Equal, scratch, Operand(1));
+ wasmTrap(wasm::Trap::InvalidConversionToInteger, off);
+ bind(&notNaN);
+
+ Label isOverflow;
+ const double two_31 = -double(INT32_MIN);
+ ScratchDoubleScope fpscratch(*this);
+ if (flags & TRUNC_UNSIGNED) {
+ loadConstantDouble(two_31 * 2, fpscratch);
+ ma_compareF64(scratch, Assembler::DoubleGreaterThanOrEqual, input,
+ fpscratch);
+ ma_branch(&isOverflow, Equal, scratch, Operand(1));
+ loadConstantDouble(-1.0, fpscratch);
+ ma_compareF64(scratch, Assembler::DoubleGreaterThan, input, fpscratch);
+ ma_b(scratch, Imm32(1), rejoin, Equal);
+ } else {
+ loadConstantDouble(two_31, fpscratch);
+ ma_compareF64(scratch, Assembler::DoubleGreaterThanOrEqual, input,
+ fpscratch);
+ ma_branch(&isOverflow, Equal, scratch, Operand(1));
+ loadConstantDouble(-two_31 - 1, fpscratch);
+ ma_compareF64(scratch, Assembler::DoubleGreaterThan, input, fpscratch);
+ ma_b(scratch, Imm32(1), rejoin, Equal);
+ }
+ bind(&isOverflow);
+ wasmTrap(wasm::Trap::IntegerOverflow, off);
+}
+
+void MacroAssembler::oolWasmTruncateCheckF32ToI64(FloatRegister input,
+ Register64 output,
+ TruncFlags flags,
+ wasm::BytecodeOffset off,
+ Label* rejoin) {
+ Label notNaN;
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ CompareIsNotNanF32(scratch, input, input);
+ ma_branch(&notNaN, Equal, scratch, Operand(1));
+ wasmTrap(wasm::Trap::InvalidConversionToInteger, off);
+ bind(&notNaN);
+
+ Label isOverflow;
+ const float two_63 = -float(INT64_MIN);
+ ScratchFloat32Scope fpscratch(*this);
+ if (flags & TRUNC_UNSIGNED) {
+ loadConstantFloat32(two_63 * 2, fpscratch);
+ ma_compareF32(scratch, Assembler::DoubleGreaterThanOrEqual, input,
+ fpscratch);
+ ma_branch(&isOverflow, Equal, scratch, Operand(1));
+ loadConstantFloat32(-1.0f, fpscratch);
+ ma_compareF32(scratch, Assembler::DoubleGreaterThan, input, fpscratch);
+ ma_b(scratch, Imm32(1), rejoin, Equal);
+ } else {
+ loadConstantFloat32(two_63, fpscratch);
+ ma_compareF32(scratch, Assembler::DoubleGreaterThanOrEqual, input,
+ fpscratch);
+ ma_branch(&isOverflow, Equal, scratch, Operand(1));
+ loadConstantFloat32(-two_63, fpscratch);
+ ma_compareF32(scratch, Assembler::DoubleGreaterThanOrEqual, input,
+ fpscratch);
+ ma_b(scratch, Imm32(1), rejoin, Equal);
+ }
+ bind(&isOverflow);
+ wasmTrap(wasm::Trap::IntegerOverflow, off);
+}
+
+void MacroAssembler::oolWasmTruncateCheckF64ToI64(FloatRegister input,
+ Register64 output,
+ TruncFlags flags,
+ wasm::BytecodeOffset off,
+ Label* rejoin) {
+ Label notNaN;
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ CompareIsNotNanF64(scratch, input, input);
+ ma_branch(&notNaN, Equal, scratch, Operand(1));
+ wasmTrap(wasm::Trap::InvalidConversionToInteger, off);
+ bind(&notNaN);
+
+ Label isOverflow;
+ const double two_63 = -double(INT64_MIN);
+ ScratchDoubleScope fpscratch(*this);
+ if (flags & TRUNC_UNSIGNED) {
+ loadConstantDouble(two_63 * 2, fpscratch);
+ ma_compareF64(scratch, Assembler::DoubleGreaterThanOrEqual, input,
+ fpscratch);
+ ma_branch(&isOverflow, Equal, scratch, Operand(1));
+ loadConstantDouble(-1.0, fpscratch);
+ ma_compareF64(scratch, Assembler::DoubleGreaterThan, input, fpscratch);
+ ma_b(scratch, Imm32(1), rejoin, Equal);
+ } else {
+ loadConstantDouble(two_63, fpscratch);
+ ma_compareF64(scratch, Assembler::DoubleGreaterThanOrEqual, input,
+ fpscratch);
+ ma_branch(&isOverflow, Equal, scratch, Operand(1));
+ loadConstantDouble(-two_63, fpscratch);
+ ma_compareF64(scratch, Assembler::DoubleGreaterThan, input, fpscratch);
+ ma_b(scratch, Imm32(1), rejoin, Equal);
+ }
+ bind(&isOverflow);
+ wasmTrap(wasm::Trap::IntegerOverflow, off);
+}
+void MacroAssembler::patchCallToNop(uint8_t* call) {
+ uint32_t* p = reinterpret_cast<uint32_t*>(call) - 7;
+ *reinterpret_cast<Instr*>(p) = kNopByte;
+ *reinterpret_cast<Instr*>(p + 1) = kNopByte;
+ *reinterpret_cast<Instr*>(p + 2) = kNopByte;
+ *reinterpret_cast<Instr*>(p + 3) = kNopByte;
+ *reinterpret_cast<Instr*>(p + 4) = kNopByte;
+ *reinterpret_cast<Instr*>(p + 5) = kNopByte;
+ *reinterpret_cast<Instr*>(p + 6) = kNopByte;
+}
+
+CodeOffset MacroAssembler::callWithPatch() {
+ BlockTrampolinePoolScope block_trampoline_pool(this, 2);
+ DEBUG_PRINTF("\tcallWithPatch\n");
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ int32_t imm32 = 1 * sizeof(uint32_t);
+ int32_t Hi20 = ((imm32 + 0x800) >> 12);
+ int32_t Lo12 = imm32 << 20 >> 20;
+ auipc(scratch, Hi20); // Read PC + Hi20 into scratch.
+ jalr(scratch, Lo12); // jump PC + Hi20 + Lo12
+ DEBUG_PRINTF("\tret %d\n", currentOffset());
+ return CodeOffset(currentOffset());
+}
+
+void MacroAssembler::patchCall(uint32_t callerOffset, uint32_t calleeOffset) {
+ DEBUG_PRINTF("\tpatchCall\n");
+ BufferOffset call(callerOffset - 2 * sizeof(uint32_t));
+ DEBUG_PRINTF("\tcallerOffset %d\n", callerOffset);
+ int32_t offset = BufferOffset(calleeOffset).getOffset() - call.getOffset();
+ if (is_int32(offset)) {
+ Instruction* auipc_ = (Instruction*)editSrc(call);
+ Instruction* jalr_ = (Instruction*)editSrc(
+ BufferOffset(callerOffset - 1 * sizeof(uint32_t)));
+ DEBUG_PRINTF("\t%p %lu\n\t", auipc_, callerOffset - 2 * sizeof(uint32_t));
+ disassembleInstr(auipc_->InstructionBits());
+ DEBUG_PRINTF("\t%p %lu\n\t", jalr_, callerOffset - 1 * sizeof(uint32_t));
+ disassembleInstr(jalr_->InstructionBits());
+ DEBUG_PRINTF("\t\n");
+ MOZ_ASSERT(IsJalr(jalr_->InstructionBits()) &&
+ IsAuipc(auipc_->InstructionBits()));
+ MOZ_ASSERT(auipc_->RdValue() == jalr_->Rs1Value());
+ int32_t Hi20 = (((int32_t)offset + 0x800) >> 12);
+ int32_t Lo12 = (int32_t)offset << 20 >> 20;
+ instr_at_put(call, SetAuipcOffset(Hi20, auipc_->InstructionBits()));
+ instr_at_put(BufferOffset(callerOffset - 1 * sizeof(uint32_t)),
+ SetJalrOffset(Lo12, jalr_->InstructionBits()));
+ } else {
+ MOZ_CRASH();
+ }
+}
+
+void MacroAssembler::patchFarJump(CodeOffset farJump, uint32_t targetOffset) {
+ uint32_t* u32 = reinterpret_cast<uint32_t*>(
+ editSrc(BufferOffset(farJump.offset() + 4 * kInstrSize)));
+ MOZ_ASSERT(*u32 == UINT32_MAX);
+ *u32 = targetOffset - farJump.offset();
+}
+
+void MacroAssembler::patchNearAddressMove(CodeLocationLabel loc,
+ CodeLocationLabel target) {
+ PatchDataWithValueCheck(loc, ImmPtr(target.raw()), ImmPtr(nullptr));
+}
+
+void MacroAssembler::patchNopToCall(uint8_t* call, uint8_t* target) {
+ uint32_t* p = reinterpret_cast<uint32_t*>(call) - 7;
+ Assembler::WriteLoad64Instructions((Instruction*)p, ScratchRegister,
+ (uint64_t)target);
+ DEBUG_PRINTF("\tpatchNopToCall %lu %lu\n", (uint64_t)target,
+ ExtractLoad64Value((Instruction*)p));
+ MOZ_ASSERT(ExtractLoad64Value((Instruction*)p) == (uint64_t)target);
+ Instr jalr_ = JALR | (ra.code() << kRdShift) | (0x0 << kFunct3Shift) |
+ (ScratchRegister.code() << kRs1Shift) | (0x0 << kImm12Shift);
+ *reinterpret_cast<Instr*>(p + 6) = jalr_;
+}
+void MacroAssembler::Pop(Register reg) {
+ ma_pop(reg);
+ adjustFrame(-int32_t(sizeof(intptr_t)));
+}
+
+void MacroAssembler::Pop(FloatRegister f) {
+ ma_pop(f);
+ adjustFrame(-int32_t(sizeof(double)));
+}
+
+void MacroAssembler::Pop(const ValueOperand& val) {
+ popValue(val);
+ adjustFrame(-int32_t(sizeof(Value)));
+}
+
+void MacroAssembler::PopRegsInMaskIgnore(LiveRegisterSet set,
+ LiveRegisterSet ignore) {
+ int32_t diff =
+ set.gprs().size() * sizeof(intptr_t) + set.fpus().getPushSizeInBytes();
+ const int32_t reserved = diff;
+
+ for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
+ diff -= sizeof(intptr_t);
+ if (!ignore.has(*iter)) {
+ loadPtr(Address(StackPointer, diff), *iter);
+ }
+ }
+
+#ifdef ENABLE_WASM_SIMD
+# error "Needs more careful logic if SIMD is enabled"
+#endif
+
+ for (FloatRegisterBackwardIterator iter(set.fpus().reduceSetForPush());
+ iter.more(); ++iter) {
+ diff -= sizeof(double);
+ if (!ignore.has(*iter)) {
+ loadDouble(Address(StackPointer, diff), *iter);
+ }
+ }
+ MOZ_ASSERT(diff == 0);
+ freeStack(reserved);
+}
+
+void MacroAssembler::pushReturnAddress() { push(ra); }
+
+void MacroAssembler::popReturnAddress() { pop(ra); }
+void MacroAssembler::PopStackPtr() {
+ loadPtr(Address(StackPointer, 0), StackPointer);
+ adjustFrame(-int32_t(sizeof(intptr_t)));
+}
+void MacroAssembler::PushBoxed(FloatRegister reg) {
+ subFromStackPtr(Imm32(sizeof(double)));
+ boxDouble(reg, Address(getStackPointer(), 0));
+ adjustFrame(sizeof(double));
+}
+
+void MacroAssembler::Push(Register reg) {
+ ma_push(reg);
+ adjustFrame(int32_t(sizeof(intptr_t)));
+}
+
+void MacroAssembler::Push(const Imm32 imm) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ma_li(scratch, imm);
+ ma_push(scratch);
+ adjustFrame(int32_t(sizeof(intptr_t)));
+}
+
+void MacroAssembler::Push(const ImmWord imm) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ma_li(scratch, imm);
+ ma_push(scratch);
+ adjustFrame(int32_t(sizeof(intptr_t)));
+}
+
+void MacroAssembler::Push(const ImmPtr imm) {
+ Push(ImmWord(uintptr_t(imm.value)));
+}
+
+void MacroAssembler::Push(const ImmGCPtr ptr) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ma_li(scratch, ptr);
+ ma_push(scratch);
+ adjustFrame(int32_t(sizeof(intptr_t)));
+}
+
+void MacroAssembler::Push(FloatRegister f) {
+ ma_push(f);
+ adjustFrame(int32_t(sizeof(double)));
+}
+
+void MacroAssembler::PushRegsInMask(LiveRegisterSet set) {
+ int32_t diff =
+ set.gprs().size() * sizeof(intptr_t) + set.fpus().getPushSizeInBytes();
+ const int32_t reserved = diff;
+
+ reserveStack(reserved);
+ for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
+ diff -= sizeof(intptr_t);
+ storePtr(*iter, Address(StackPointer, diff));
+ }
+
+#ifdef ENABLE_WASM_SIMD
+# error "Needs more careful logic if SIMD is enabled"
+#endif
+
+ for (FloatRegisterBackwardIterator iter(set.fpus().reduceSetForPush());
+ iter.more(); ++iter) {
+ diff -= sizeof(double);
+ storeDouble(*iter, Address(StackPointer, diff));
+ }
+ MOZ_ASSERT(diff == 0);
+}
+
+void MacroAssembler::roundFloat32ToInt32(FloatRegister src, Register dest,
+ FloatRegister temp, Label* fail) {
+ JitSpew(JitSpew_Codegen, "[ %s", __FUNCTION__);
+ ScratchDoubleScope fscratch(*this);
+ Label negative, done;
+ // Branch to a slow path if input < 0.0 due to complicated rounding rules.
+ // Note that Fcmp with NaN unsets the negative flag.
+ {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ fmv_x_w(scratch, src);
+ ma_branch(fail, Equal, scratch, Operand(int32_t(0x80000000)));
+ fmv_w_x(temp, zero);
+ ma_compareF32(scratch, DoubleLessThan, src, temp);
+ ma_branch(&negative, Equal, scratch, Operand(1));
+ }
+ // Handle the simple case of a positive input, and also -0 and NaN.
+ // Rounding proceeds with consideration of the fractional part of the input:
+ // 1. If > 0.5, round to integer with higher absolute value (so, up).
+ // 2. If < 0.5, round to integer with lower absolute value (so, down).
+ // 3. If = 0.5, round to +Infinity (so, up).
+ {
+ // Convert to signed 32-bit integer, rounding halfway cases away from zero.
+ // In the case of overflow, the output is saturated.
+ // In the case of NaN and -0, the output is zero.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ RoundFloatingPointToInteger(
+ dest, src, scratch,
+ [](MacroAssemblerRiscv64* masm, Register dst, FPURegister src) {
+ masm->fcvt_w_s(dst, src, RMM);
+ },
+ false);
+ ma_b(scratch, Imm32(1), fail, NotEqual);
+ jump(&done);
+ }
+
+ // Handle the complicated case of a negative input.
+ // Rounding proceeds with consideration of the fractional part of the input:
+ // 1. If > 0.5, round to integer with higher absolute value (so, down).
+ // 2. If < 0.5, round to integer with lower absolute value (so, up).
+ // 3. If = 0.5, round to +Infinity (so, up).
+ bind(&negative);
+ {
+ // Inputs in [-0.5, 0) need 0.5 added; other negative inputs need
+ // the biggest double less than 0.5.
+ Label join;
+ loadConstantFloat32(GetBiggestNumberLessThan(0.5), temp);
+ loadConstantFloat32(-0.5, fscratch);
+ branchFloat(Assembler::DoubleLessThan, src, fscratch, &join);
+ loadConstantFloat32(0.5, temp);
+ bind(&join);
+ addFloat32(src, temp);
+ // Round all values toward -Infinity.
+ // In the case of overflow, the output is saturated.
+ // NaN and -0 are already handled by the "positive number" path above.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ RoundFloatingPointToInteger(
+ dest, temp, scratch,
+ [](MacroAssemblerRiscv64* masm, Register dst, FPURegister src) {
+ masm->fcvt_w_s(dst, src, RDN);
+ },
+ false);
+ ma_b(scratch, Imm32(1), fail, NotEqual);
+ // If output is zero, then the actual result is -0. Fail.
+ branchTest32(Assembler::Zero, dest, dest, fail);
+ }
+ bind(&done);
+ JitSpew(JitSpew_Codegen, "]");
+}
+
+void MacroAssembler::roundDoubleToInt32(FloatRegister src, Register dest,
+ FloatRegister temp, Label* fail) {
+ JitSpew(JitSpew_Codegen, "[ %s", __FUNCTION__);
+
+ ScratchDoubleScope fscratch(*this);
+ Label negative, done;
+ // Branch to a slow path if input < 0.0 due to complicated rounding rules.
+ // Note that Fcmp with NaN unsets the negative flag.
+ {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ fmv_x_d(scratch, src);
+ ma_branch(fail, Equal, scratch, Operand(0x8000000000000000));
+ fmv_d_x(temp, zero);
+ ma_compareF64(scratch, DoubleLessThan, src, temp);
+ ma_branch(&negative, Equal, scratch, Operand(1));
+ }
+ // Handle the simple case of a positive input, and also -0 and NaN.
+ // Rounding proceeds with consideration of the fractional part of the input:
+ // 1. If > 0.5, round to integer with higher absolute value (so, up).
+ // 2. If < 0.5, round to integer with lower absolute value (so, down).
+ // 3. If = 0.5, round to +Infinity (so, up).
+ {
+ // Convert to signed 32-bit integer, rounding halfway cases away from zero.
+ // In the case of overflow, the output is saturated.
+ // In the case of NaN and -0, the output is zero.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ RoundFloatingPointToInteger(
+ dest, src, scratch,
+ [](MacroAssemblerRiscv64* masm, Register dst, FPURegister src) {
+ masm->fcvt_w_d(dst, src, RMM);
+ },
+ false);
+ ma_b(scratch, Imm32(1), fail, NotEqual);
+ jump(&done);
+ }
+
+ // Handle the complicated case of a negative input.
+ // Rounding proceeds with consideration of the fractional part of the input:
+ // 1. If > 0.5, round to integer with higher absolute value (so, down).
+ // 2. If < 0.5, round to integer with lower absolute value (so, up).
+ // 3. If = 0.5, round to +Infinity (so, up).
+ bind(&negative);
+ {
+ // Inputs in [-0.5, 0) need 0.5 added; other negative inputs need
+ // the biggest double less than 0.5.
+ Label join;
+ loadConstantDouble(GetBiggestNumberLessThan(0.5), temp);
+ loadConstantDouble(-0.5, fscratch);
+ branchDouble(Assembler::DoubleLessThan, src, fscratch, &join);
+ loadConstantDouble(0.5, temp);
+ bind(&join);
+ addDouble(src, temp);
+ // Round all values toward -Infinity.
+ // In the case of overflow, the output is saturated.
+ // NaN and -0 are already handled by the "positive number" path above.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ RoundFloatingPointToInteger(
+ dest, temp, scratch,
+ [](MacroAssemblerRiscv64* masm, Register dst, FPURegister src) {
+ masm->fcvt_w_d(dst, src, RDN);
+ },
+ false);
+ ma_b(scratch, Imm32(1), fail, NotEqual);
+ // If output is zero, then the actual result is -0. Fail.
+ branchTest32(Assembler::Zero, dest, dest, fail);
+ }
+ bind(&done);
+ JitSpew(JitSpew_Codegen, "]");
+}
+
+void MacroAssembler::setupUnalignedABICall(Register scratch) {
+ MOZ_ASSERT(!IsCompilingWasm(), "wasm should only use aligned ABI calls");
+ setupNativeABICall();
+ dynamicAlignment_ = true;
+
+ or_(scratch, StackPointer, zero);
+
+ // Force sp to be aligned
+ asMasm().subPtr(Imm32(sizeof(uintptr_t)), StackPointer);
+ ma_and(StackPointer, StackPointer, Imm32(~(ABIStackAlignment - 1)));
+ storePtr(scratch, Address(StackPointer, 0));
+}
+void MacroAssembler::shiftIndex32AndAdd(Register indexTemp32, int shift,
+ Register pointer) {
+ if (IsShiftInScaleRange(shift)) {
+ computeEffectiveAddress(
+ BaseIndex(pointer, indexTemp32, ShiftToScale(shift)), pointer);
+ return;
+ }
+ lshift32(Imm32(shift), indexTemp32);
+ addPtr(indexTemp32, pointer);
+}
+void MacroAssembler::speculationBarrier() { MOZ_CRASH(); }
+void MacroAssembler::storeRegsInMask(LiveRegisterSet set, Address dest,
+ Register) {
+ FloatRegisterSet fpuSet(set.fpus().reduceSetForPush());
+ unsigned numFpu = fpuSet.size();
+ int32_t diffF = fpuSet.getPushSizeInBytes();
+ int32_t diffG = set.gprs().size() * sizeof(intptr_t);
+
+ MOZ_ASSERT(dest.offset >= diffG + diffF);
+
+ for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
+ diffG -= sizeof(intptr_t);
+ dest.offset -= sizeof(intptr_t);
+ storePtr(*iter, dest);
+ }
+ MOZ_ASSERT(diffG == 0);
+
+#ifdef ENABLE_WASM_SIMD
+# error "Needs more careful logic if SIMD is enabled"
+#endif
+
+ for (FloatRegisterBackwardIterator iter(fpuSet); iter.more(); ++iter) {
+ FloatRegister reg = *iter;
+ diffF -= reg.size();
+ numFpu -= 1;
+ dest.offset -= reg.size();
+ if (reg.isDouble()) {
+ storeDouble(reg, dest);
+ } else if (reg.isSingle()) {
+ storeFloat32(reg, dest);
+ } else {
+ MOZ_CRASH("Unknown register type.");
+ }
+ }
+ MOZ_ASSERT(numFpu == 0);
+ diffF -= diffF % sizeof(uintptr_t);
+ MOZ_ASSERT(diffF == 0);
+}
+void MacroAssembler::truncDoubleToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Label zeroCase, done;
+ // Convert scalar to signed 32-bit fixed-point, rounding toward zero.
+ // In the case of overflow, the output is saturated.
+ // In the case of NaN and -0, the output is zero.
+ RoundFloatingPointToInteger(
+ dest, src, scratch,
+ [](MacroAssemblerRiscv64* masm, Register dst, FPURegister src) {
+ masm->fcvt_w_d(dst, src, RTZ);
+ },
+ false);
+ ma_b(scratch, Imm32(1), fail, NotEqual);
+ // If the output was zero, worry about special cases.
+ branch32(Assembler::Equal, dest, Imm32(0), &zeroCase);
+ jump(&done);
+ // Handle the case of a zero output:
+ // 1. The input may have been NaN, requiring a failure.
+ // 2. The input may have been in (-1,-0], requiring a failure.
+ // 3. +0, return 0.
+ {
+ bind(&zeroCase);
+
+ // If input is a negative number that truncated to zero, the real
+ // output should be the non-integer -0.
+ // The use of "lt" instead of "lo" also catches unordered NaN input.
+ ScratchDoubleScope fscratch(*this);
+ fmv_d_x(fscratch, zero);
+ ma_compareF64(scratch, DoubleLessThan, src, fscratch);
+ ma_b(scratch, Imm32(1), fail, Equal);
+
+ // Check explicitly for -0, bitwise.
+ fmv_x_d(dest, src);
+ branchTestPtr(Assembler::Signed, dest, dest, fail);
+ movePtr(ImmWord(0), dest);
+ }
+
+ bind(&done);
+}
+void MacroAssembler::truncFloat32ToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Label zeroCase, done;
+ // Convert scalar to signed 32-bit fixed-point, rounding toward zero.
+ // In the case of overflow, the output is saturated.
+ // In the case of NaN and -0, the output is zero.
+ RoundFloatingPointToInteger(
+ dest, src, scratch,
+ [](MacroAssemblerRiscv64* masm, Register dst, FPURegister src) {
+ masm->fcvt_w_s(dst, src, RTZ);
+ },
+ false);
+ ma_b(scratch, Imm32(1), fail, NotEqual);
+ // If the output was zero, worry about special cases.
+ branch32(Assembler::Equal, dest, Imm32(0), &zeroCase);
+ jump(&done);
+ // Handle the case of a zero output:
+ // 1. The input may have been NaN, requiring a failure.
+ // 2. The input may have been in (-1,-0], requiring a failure.
+ // 3. +0, return 0.
+ {
+ bind(&zeroCase);
+
+ // If input is a negative number that truncated to zero, the real
+ // output should be the non-integer -0.
+ // The use of "lt" instead of "lo" also catches unordered NaN input.
+ ScratchDoubleScope fscratch(*this);
+ fmv_w_x(fscratch, zero);
+ ma_compareF32(scratch, DoubleLessThan, src, fscratch);
+ ma_b(scratch, Imm32(1), fail, Equal);
+
+ // Check explicitly for -0, bitwise.
+ fmv_x_w(dest, src);
+ branchTestPtr(Assembler::Signed, dest, dest, fail);
+ movePtr(ImmWord(0), dest);
+ }
+
+ bind(&done);
+}
+void MacroAssembler::wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register value,
+ const Address& mem, Register valueTemp,
+ Register offsetTemp,
+ Register maskTemp) {
+ AtomicEffectOp(*this, &access, access.type(), access.sync(), op, mem, value,
+ valueTemp, offsetTemp, maskTemp);
+}
+
+void MacroAssembler::wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register value,
+ const BaseIndex& mem,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp) {
+ AtomicEffectOp(*this, &access, access.type(), access.sync(), op, mem, value,
+ valueTemp, offsetTemp, maskTemp);
+}
+template <typename T>
+static void WasmAtomicExchange64(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc& access,
+ const T& mem, Register64 value,
+ Register64 output) {
+ AtomicExchange64(masm, &access, access.sync(), mem, value, output);
+}
+
+void MacroAssembler::wasmAtomicExchange64(const wasm::MemoryAccessDesc& access,
+ const Address& mem, Register64 src,
+ Register64 output) {
+ WasmAtomicExchange64(*this, access, mem, src, output);
+}
+
+void MacroAssembler::wasmAtomicExchange64(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& mem, Register64 src,
+ Register64 output) {
+ WasmAtomicExchange64(*this, access, mem, src, output);
+}
+void MacroAssembler::wasmAtomicExchange(const wasm::MemoryAccessDesc& access,
+ const Address& mem, Register value,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register output) {
+ AtomicExchange(*this, &access, access.type(), access.sync(), mem, value,
+ valueTemp, offsetTemp, maskTemp, output);
+}
+
+void MacroAssembler::wasmAtomicExchange(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& mem, Register value,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register output) {
+ AtomicExchange(*this, &access, access.type(), access.sync(), mem, value,
+ valueTemp, offsetTemp, maskTemp, output);
+}
+void MacroAssembler::wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register64 value,
+ const Address& mem, Register64 temp,
+ Register64 output) {
+ AtomicFetchOp64(*this, &access, access.sync(), op, value, mem, temp, output);
+}
+void MacroAssembler::wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register64 value,
+ const BaseIndex& mem, Register64 temp,
+ Register64 output) {
+ AtomicFetchOp64(*this, &access, access.sync(), op, value, mem, temp, output);
+}
+
+void MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op,
+ Register64 value, const Address& mem,
+ Register64 temp, Register64 output) {
+ AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, output);
+}
+
+void MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op,
+ Register64 value, const BaseIndex& mem,
+ Register64 temp, Register64 output) {
+ AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, output);
+}
+
+void MacroAssembler::atomicEffectOp64(const Synchronization& sync, AtomicOp op,
+ Register64 value, const Address& mem,
+ Register64 temp) {
+ AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, temp);
+}
+
+void MacroAssembler::atomicEffectOp64(const Synchronization& sync, AtomicOp op,
+ Register64 value, const BaseIndex& mem,
+ Register64 temp) {
+ AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, temp);
+}
+void MacroAssembler::wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register value,
+ const Address& mem, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
+ AtomicFetchOp(*this, &access, access.type(), access.sync(), op, mem, value,
+ valueTemp, offsetTemp, maskTemp, output);
+}
+
+void MacroAssembler::wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register value,
+ const BaseIndex& mem, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
+ AtomicFetchOp(*this, &access, access.type(), access.sync(), op, mem, value,
+ valueTemp, offsetTemp, maskTemp, output);
+}
+void MacroAssembler::wasmBoundsCheck32(Condition cond, Register index,
+ Register boundsCheckLimit, Label* ok) {
+ ma_b(index, boundsCheckLimit, ok, cond);
+}
+
+void MacroAssembler::wasmBoundsCheck32(Condition cond, Register index,
+ Address boundsCheckLimit, Label* ok) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ load32(boundsCheckLimit, scratch2);
+ ma_b(index, Register(scratch2), ok, cond);
+}
+
+void MacroAssembler::wasmBoundsCheck64(Condition cond, Register64 index,
+ Register64 boundsCheckLimit, Label* ok) {
+ ma_b(index.reg, boundsCheckLimit.reg, ok, cond);
+}
+
+void MacroAssembler::wasmBoundsCheck64(Condition cond, Register64 index,
+ Address boundsCheckLimit, Label* ok) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ loadPtr(boundsCheckLimit, scratch2);
+ ma_b(index.reg, scratch2, ok, cond);
+}
+
+void MacroAssembler::wasmCompareExchange64(const wasm::MemoryAccessDesc& access,
+ const Address& mem,
+ Register64 expect,
+ Register64 replace,
+ Register64 output) {
+ CompareExchange64(*this, &access, access.sync(), mem, expect, replace,
+ output);
+}
+
+void MacroAssembler::wasmCompareExchange64(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& mem,
+ Register64 expect,
+ Register64 replace,
+ Register64 output) {
+ CompareExchange64(*this, &access, access.sync(), mem, expect, replace,
+ output);
+}
+
+template <typename T>
+static void CompareExchange(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ Scalar::Type type, const Synchronization& sync,
+ const T& mem, Register oldval, Register newval,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register output) {
+ bool signExtend = Scalar::isSignedIntType(type);
+ unsigned nbytes = Scalar::byteSize(type);
+
+ switch (nbytes) {
+ case 1:
+ case 2:
+ break;
+ case 4:
+ MOZ_ASSERT(valueTemp == InvalidReg);
+ MOZ_ASSERT(offsetTemp == InvalidReg);
+ MOZ_ASSERT(maskTemp == InvalidReg);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ Label again, end;
+ UseScratchRegisterScope temps(&masm);
+ Register SecondScratchReg = temps.Acquire();
+ masm.computeEffectiveAddress(mem, SecondScratchReg);
+
+ if (nbytes == 4) {
+ masm.memoryBarrierBefore(sync);
+ masm.bind(&again);
+
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+
+ masm.lr_w(true, true, output, SecondScratchReg);
+ masm.ma_b(output, oldval, &end, Assembler::NotEqual, ShortJump);
+ masm.mv(ScratchRegister, newval);
+ masm.sc_w(true, true, ScratchRegister, SecondScratchReg, ScratchRegister);
+ masm.ma_b(ScratchRegister, ScratchRegister, &again, Assembler::NonZero,
+ ShortJump);
+
+ masm.memoryBarrierAfter(sync);
+ masm.bind(&end);
+
+ return;
+ }
+
+ masm.andi(offsetTemp, SecondScratchReg, 3);
+ masm.subPtr(offsetTemp, SecondScratchReg);
+#if !MOZ_LITTLE_ENDIAN()
+ masm.as_xori(offsetTemp, offsetTemp, 3);
+#endif
+ masm.slli(offsetTemp, offsetTemp, 3);
+ masm.ma_li(maskTemp, Imm32(UINT32_MAX >> ((4 - nbytes) * 8)));
+ masm.sll(maskTemp, maskTemp, offsetTemp);
+ masm.nor(maskTemp, zero, maskTemp);
+
+ masm.memoryBarrierBefore(sync);
+
+ masm.bind(&again);
+
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+
+ masm.lr_w(true, true, ScratchRegister, SecondScratchReg);
+
+ masm.srl(output, ScratchRegister, offsetTemp);
+
+ switch (nbytes) {
+ case 1:
+ if (signExtend) {
+ masm.SignExtendByte(valueTemp, oldval);
+ masm.SignExtendByte(output, output);
+ } else {
+ masm.andi(valueTemp, oldval, 0xff);
+ masm.andi(output, output, 0xff);
+ }
+ break;
+ case 2:
+ if (signExtend) {
+ masm.SignExtendShort(valueTemp, oldval);
+ masm.SignExtendShort(output, output);
+ } else {
+ masm.andi(valueTemp, oldval, 0xffff);
+ masm.andi(output, output, 0xffff);
+ }
+ break;
+ }
+
+ masm.ma_b(output, valueTemp, &end, Assembler::NotEqual, ShortJump);
+
+ masm.sll(valueTemp, newval, offsetTemp);
+ masm.and_(ScratchRegister, ScratchRegister, maskTemp);
+ masm.or_(ScratchRegister, ScratchRegister, valueTemp);
+ masm.sc_w(true, true, ScratchRegister, SecondScratchReg, ScratchRegister);
+
+ masm.ma_b(ScratchRegister, ScratchRegister, &again, Assembler::NonZero,
+ ShortJump);
+
+ masm.memoryBarrierAfter(sync);
+
+ masm.bind(&end);
+}
+
+void MacroAssembler::compareExchange(Scalar::Type type,
+ const Synchronization& sync,
+ const Address& mem, Register oldval,
+ Register newval, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
+ CompareExchange(*this, nullptr, type, sync, mem, oldval, newval, valueTemp,
+ offsetTemp, maskTemp, output);
+}
+
+void MacroAssembler::compareExchange(Scalar::Type type,
+ const Synchronization& sync,
+ const BaseIndex& mem, Register oldval,
+ Register newval, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
+ CompareExchange(*this, nullptr, type, sync, mem, oldval, newval, valueTemp,
+ offsetTemp, maskTemp, output);
+}
+
+void MacroAssembler::wasmCompareExchange(const wasm::MemoryAccessDesc& access,
+ const Address& mem, Register oldval,
+ Register newval, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
+ CompareExchange(*this, &access, access.type(), access.sync(), mem, oldval,
+ newval, valueTemp, offsetTemp, maskTemp, output);
+}
+
+void MacroAssembler::wasmCompareExchange(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& mem, Register oldval,
+ Register newval, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
+ CompareExchange(*this, &access, access.type(), access.sync(), mem, oldval,
+ newval, valueTemp, offsetTemp, maskTemp, output);
+}
+
+void MacroAssembler::wasmLoad(const wasm::MemoryAccessDesc& access,
+ Register memoryBase, Register ptr,
+ Register ptrScratch, AnyRegister output) {
+ wasmLoadImpl(access, memoryBase, ptr, ptrScratch, output, InvalidReg);
+}
+
+void MacroAssembler::wasmLoadI64(const wasm::MemoryAccessDesc& access,
+ Register memoryBase, Register ptr,
+ Register ptrScratch, Register64 output) {
+ wasmLoadI64Impl(access, memoryBase, ptr, ptrScratch, output, InvalidReg);
+}
+
+void MacroAssembler::wasmStore(const wasm::MemoryAccessDesc& access,
+ AnyRegister value, Register memoryBase,
+ Register ptr, Register ptrScratch) {
+ wasmStoreImpl(access, value, memoryBase, ptr, ptrScratch, InvalidReg);
+}
+
+void MacroAssembler::wasmStoreI64(const wasm::MemoryAccessDesc& access,
+ Register64 value, Register memoryBase,
+ Register ptr, Register ptrScratch) {
+ wasmStoreI64Impl(access, value, memoryBase, ptr, ptrScratch, InvalidReg);
+}
+
+void MacroAssemblerRiscv64::Clear_if_nan_d(Register rd, FPURegister fs) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ Label no_nan;
+ feq_d(ScratchRegister, fs, fs);
+ bnez(ScratchRegister, &no_nan);
+ mv(rd, zero_reg);
+ bind(&no_nan);
+}
+
+void MacroAssemblerRiscv64::Clear_if_nan_s(Register rd, FPURegister fs) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ Label no_nan;
+ feq_s(ScratchRegister, fs, fs);
+ bnez(ScratchRegister, &no_nan);
+ mv(rd, zero_reg);
+ bind(&no_nan);
+}
+
+void MacroAssembler::wasmTruncateDoubleToInt32(FloatRegister input,
+ Register output,
+ bool isSaturating,
+ Label* oolEntry) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ Trunc_w_d(output, input, ScratchRegister);
+ if (isSaturating) {
+ Clear_if_nan_d(output, input);
+ } else {
+ ma_b(ScratchRegister, Imm32(1), oolEntry, Assembler::NotEqual);
+ }
+}
+
+void MacroAssembler::wasmTruncateDoubleToInt64(
+ FloatRegister input, Register64 output, bool isSaturating, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempDouble) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ Trunc_l_d(output.reg, input, ScratchRegister);
+ if (isSaturating) {
+ bind(oolRejoin);
+ Clear_if_nan_d(output.reg, input);
+ } else {
+ ma_b(ScratchRegister, Imm32(1), oolEntry, Assembler::NotEqual);
+ }
+}
+
+void MacroAssembler::wasmTruncateDoubleToUInt32(FloatRegister input,
+ Register output,
+ bool isSaturating,
+ Label* oolEntry) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ Trunc_uw_d(output, input, ScratchRegister);
+ if (isSaturating) {
+ Clear_if_nan_d(output, input);
+ } else {
+ ma_b(ScratchRegister, Imm32(1), oolEntry, Assembler::NotEqual);
+ }
+}
+
+void MacroAssembler::wasmTruncateDoubleToUInt64(
+ FloatRegister input, Register64 output, bool isSaturating, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempDouble) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ Trunc_ul_d(output.reg, input, ScratchRegister);
+ if (isSaturating) {
+ bind(oolRejoin);
+ Clear_if_nan_d(output.reg, input);
+ } else {
+ ma_b(ScratchRegister, Imm32(1), oolEntry, Assembler::NotEqual);
+ }
+}
+
+void MacroAssembler::wasmTruncateFloat32ToInt32(FloatRegister input,
+ Register output,
+ bool isSaturating,
+ Label* oolEntry) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ Trunc_w_s(output, input, ScratchRegister);
+ if (isSaturating) {
+ Clear_if_nan_s(output, input);
+ } else {
+ ma_b(ScratchRegister, Imm32(1), oolEntry, Assembler::NotEqual);
+ }
+}
+
+void MacroAssembler::wasmTruncateFloat32ToInt64(
+ FloatRegister input, Register64 output, bool isSaturating, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempFloat) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ Trunc_l_s(output.reg, input, ScratchRegister);
+
+ if (isSaturating) {
+ bind(oolRejoin);
+ Clear_if_nan_s(output.reg, input);
+ } else {
+ ma_b(ScratchRegister, Imm32(1), oolEntry, Assembler::NotEqual);
+ }
+}
+
+void MacroAssembler::wasmTruncateFloat32ToUInt32(FloatRegister input,
+ Register output,
+ bool isSaturating,
+ Label* oolEntry) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ Trunc_uw_s(output, input, ScratchRegister);
+ if (isSaturating) {
+ Clear_if_nan_s(output, input);
+ } else {
+ ma_b(ScratchRegister, Imm32(1), oolEntry, Assembler::NotEqual);
+ }
+}
+
+void MacroAssembler::wasmTruncateFloat32ToUInt64(
+ FloatRegister input, Register64 output, bool isSaturating, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempFloat) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ Trunc_ul_s(output.reg, input, ScratchRegister);
+
+ if (isSaturating) {
+ bind(oolRejoin);
+ Clear_if_nan_s(output.reg, input);
+ } else {
+ ma_b(ScratchRegister, Imm32(1), oolEntry, Assembler::NotEqual);
+ }
+}
+
+// TODO(riscv64): widenInt32 should be nop?
+void MacroAssembler::widenInt32(Register r) {
+ move32To64SignExtend(r, Register64(r));
+}
+
+//}}} check_macroassembler_style
+
+// This method generates lui, dsll and ori instruction block that can be
+// modified by UpdateLoad64Value, either during compilation (eg.
+// Assembler::bind), or during execution (eg. jit::PatchJump).
+void MacroAssemblerRiscv64::ma_liPatchable(Register dest, Imm32 imm) {
+ return ma_liPatchable(dest, ImmWord(uintptr_t(imm.value)));
+}
+
+void MacroAssemblerRiscv64::ma_liPatchable(Register dest, ImmPtr imm) {
+ return ma_liPatchable(dest, ImmWord(uintptr_t(imm.value)));
+}
+
+void MacroAssemblerRiscv64::ma_liPatchable(Register dest, ImmWord imm,
+ LiFlags flags) {
+ DEBUG_PRINTF("\tma_liPatchable\n");
+ if (Li64 == flags) {
+ li_constant(dest, imm.value);
+ } else {
+ li_ptr(dest, imm.value);
+ }
+}
+
+void MacroAssemblerRiscv64::ma_li(Register dest, ImmGCPtr ptr) {
+ BlockTrampolinePoolScope block_trampoline_pool(this, 6);
+ writeDataRelocation(ptr);
+ ma_liPatchable(dest, ImmPtr(ptr.value));
+}
+void MacroAssemblerRiscv64::ma_li(Register dest, Imm32 imm) {
+ RV_li(dest, imm.value);
+}
+void MacroAssemblerRiscv64::ma_li(Register dest, Imm64 imm) {
+ RV_li(dest, imm.value);
+}
+void MacroAssemblerRiscv64::ma_li(Register dest, CodeLabel* label) {
+ DEBUG_PRINTF("[ %s\n", __FUNCTION__);
+ BlockTrampolinePoolScope block_trampoline_pool(this, 7);
+ BufferOffset bo = m_buffer.nextOffset();
+ JitSpew(JitSpew_Codegen, ".load CodeLabel %p", label);
+ ma_liPatchable(dest, ImmWord(/* placeholder */ 0));
+ label->patchAt()->bind(bo.getOffset());
+ label->setLinkMode(CodeLabel::MoveImmediate);
+ DEBUG_PRINTF("]\n");
+}
+void MacroAssemblerRiscv64::ma_li(Register dest, ImmWord imm) {
+ RV_li(dest, imm.value);
+}
+
+// Shortcut for when we know we're transferring 32 bits of data.
+void MacroAssemblerRiscv64::ma_pop(Register r) {
+ ld(r, StackPointer, 0);
+ addi(StackPointer, StackPointer, sizeof(intptr_t));
+}
+
+void MacroAssemblerRiscv64::ma_push(Register r) {
+ if (r == sp) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ // Pushing sp requires one more instruction.
+ mv(ScratchRegister, sp);
+ r = ScratchRegister;
+ }
+
+ addi(StackPointer, StackPointer, (int32_t) - sizeof(intptr_t));
+ sd(r, StackPointer, 0);
+}
+
+// multiplies. For now, there are only few that we care about.
+void MacroAssemblerRiscv64::ma_mul32TestOverflow(Register rd, Register rj,
+ Register rk, Label* overflow) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ MulOverflow32(rd, rj, rk, ScratchRegister);
+ ma_b(ScratchRegister, Register(zero), overflow, Assembler::NotEqual);
+}
+void MacroAssemblerRiscv64::ma_mul32TestOverflow(Register rd, Register rj,
+ Imm32 imm, Label* overflow) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ MulOverflow32(rd, rj, Operand(imm.value), ScratchRegister);
+ ma_b(ScratchRegister, Register(zero), overflow, Assembler::NotEqual);
+}
+
+void MacroAssemblerRiscv64::ma_mulPtrTestOverflow(Register rd, Register rj,
+ Register rk,
+ Label* overflow) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Register scratch2 = temps.Acquire();
+ MOZ_ASSERT(rd != scratch);
+
+ if (rd == rj) {
+ or_(scratch, rj, zero);
+ rj = scratch;
+ rk = (rd == rk) ? rj : rk;
+ } else if (rd == rk) {
+ or_(scratch, rk, zero);
+ rk = scratch;
+ }
+
+ mul(rd, rj, rk);
+ mulh(scratch, rj, rk);
+ srai(scratch2, rd, 63);
+ ma_b(scratch, Register(scratch2), overflow, Assembler::NotEqual);
+}
+
+// MulOverflow32 sets overflow register to zero if no overflow occured
+void MacroAssemblerRiscv64::MulOverflow32(Register dst, Register left,
+ const Operand& right,
+ Register overflow) {
+ UseScratchRegisterScope temps(this);
+ BlockTrampolinePoolScope block_trampoline_pool(this, 11);
+ Register right_reg;
+ Register scratch = temps.Acquire();
+ Register scratch2 = temps.Acquire();
+ if (right.is_imm()) {
+ ma_li(scratch, right.immediate());
+ right_reg = scratch;
+ } else {
+ MOZ_ASSERT(right.is_reg());
+ right_reg = right.rm();
+ }
+
+ MOZ_ASSERT(left != scratch2 && right_reg != scratch2 && dst != scratch2 &&
+ overflow != scratch2);
+ MOZ_ASSERT(overflow != left && overflow != right_reg);
+ sext_w(overflow, left);
+ sext_w(scratch2, right_reg);
+
+ mul(overflow, overflow, scratch2);
+ sext_w(dst, overflow);
+ xor_(overflow, overflow, dst);
+}
+
+int32_t MacroAssemblerRiscv64::GetOffset(int32_t offset, Label* L,
+ OffsetSize bits) {
+ if (L) {
+ offset = branch_offset_helper(L, bits);
+ } else {
+ MOZ_ASSERT(is_intn(offset, bits));
+ }
+ return offset;
+}
+
+bool MacroAssemblerRiscv64::CalculateOffset(Label* L, int32_t* offset,
+ OffsetSize bits) {
+ if (!is_near(L, bits)) return false;
+ *offset = GetOffset(*offset, L, bits);
+ return true;
+}
+
+void MacroAssemblerRiscv64::BranchShortHelper(int32_t offset, Label* L) {
+ MOZ_ASSERT(L == nullptr || offset == 0);
+ offset = GetOffset(offset, L, OffsetSize::kOffset21);
+ Assembler::j(offset);
+}
+
+bool MacroAssemblerRiscv64::BranchShortHelper(int32_t offset, Label* L,
+ Condition cond, Register rs,
+ const Operand& rt) {
+ MOZ_ASSERT(L == nullptr || offset == 0);
+ MOZ_ASSERT(rt.is_reg() || rt.is_imm());
+ UseScratchRegisterScope temps(this);
+ Register scratch = Register();
+ if (rt.is_imm()) {
+ scratch = temps.Acquire();
+ ma_li(scratch, Imm64(rt.immediate()));
+ } else {
+ MOZ_ASSERT(rt.is_reg());
+ scratch = rt.rm();
+ }
+ BlockTrampolinePoolScope block_trampoline_pool(this, 2);
+ {
+ switch (cond) {
+ case Always:
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false;
+ Assembler::j(offset);
+ EmitConstPoolWithJumpIfNeeded();
+ break;
+ case Equal:
+ // rs == rt
+ if (rt.is_reg() && rs == rt.rm()) {
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false;
+ Assembler::j(offset);
+ } else {
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset13)) return false;
+ Assembler::beq(rs, scratch, offset);
+ }
+ break;
+ case NotEqual:
+ // rs != rt
+ if (rt.is_reg() && rs == rt.rm()) {
+ break; // No code needs to be emitted
+ } else {
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset13)) return false;
+ Assembler::bne(rs, scratch, offset);
+ }
+ break;
+
+ // Signed comparison.
+ case GreaterThan:
+ // rs > rt
+ if (rt.is_reg() && rs == rt.rm()) {
+ break; // No code needs to be emitted.
+ } else {
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset13)) return false;
+ Assembler::bgt(rs, scratch, offset);
+ }
+ break;
+ case GreaterThanOrEqual:
+ // rs >= rt
+ if (rt.is_reg() && rs == rt.rm()) {
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false;
+ Assembler::j(offset);
+ } else {
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset13)) return false;
+ Assembler::bge(rs, scratch, offset);
+ }
+ break;
+ case LessThan:
+ // rs < rt
+ if (rt.is_reg() && rs == rt.rm()) {
+ break; // No code needs to be emitted.
+ } else {
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset13)) return false;
+ Assembler::blt(rs, scratch, offset);
+ }
+ break;
+ case LessThanOrEqual:
+ // rs <= rt
+ if (rt.is_reg() && rs == rt.rm()) {
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false;
+ Assembler::j(offset);
+ } else {
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset13)) return false;
+ Assembler::ble(rs, scratch, offset);
+ }
+ break;
+
+ // Unsigned comparison.
+ case Above:
+ // rs > rt
+ if (rt.is_reg() && rs == rt.rm()) {
+ break; // No code needs to be emitted.
+ } else {
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset13)) return false;
+ Assembler::bgtu(rs, scratch, offset);
+ }
+ break;
+ case AboveOrEqual:
+ // rs >= rt
+ if (rt.is_reg() && rs == rt.rm()) {
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false;
+ Assembler::j(offset);
+ } else {
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset13)) return false;
+ Assembler::bgeu(rs, scratch, offset);
+ }
+ break;
+ case Below:
+ // rs < rt
+ if (rt.is_reg() && rs == rt.rm()) {
+ break; // No code needs to be emitted.
+ } else {
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset13)) return false;
+ bltu(rs, scratch, offset);
+ }
+ break;
+ case BelowOrEqual:
+ // rs <= rt
+ if (rt.is_reg() && rs == rt.rm()) {
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false;
+ Assembler::j(offset);
+ } else {
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset13)) return false;
+ Assembler::bleu(rs, scratch, offset);
+ }
+ break;
+ default:
+ MOZ_CRASH("UNREACHABLE");
+ }
+ }
+ return true;
+}
+
+// BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
+#define BRANCH_ARGS_CHECK(cond, rs, rt) \
+ MOZ_ASSERT((cond == Always && rs == zero && rt.rm() == zero) || \
+ (cond != Always && (rs != zero || rt.rm() != zero)))
+
+bool MacroAssemblerRiscv64::BranchShortCheck(int32_t offset, Label* L,
+ Condition cond, Register rs,
+ const Operand& rt) {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+
+ if (!L) {
+ MOZ_ASSERT(is_int13(offset));
+ return BranchShortHelper(offset, nullptr, cond, rs, rt);
+ } else {
+ MOZ_ASSERT(offset == 0);
+ return BranchShortHelper(0, L, cond, rs, rt);
+ }
+}
+
+void MacroAssemblerRiscv64::BranchShort(Label* L) { BranchShortHelper(0, L); }
+
+void MacroAssemblerRiscv64::BranchShort(int32_t offset, Condition cond,
+ Register rs, const Operand& rt) {
+ BranchShortCheck(offset, nullptr, cond, rs, rt);
+}
+
+void MacroAssemblerRiscv64::BranchShort(Label* L, Condition cond, Register rs,
+ const Operand& rt) {
+ BranchShortCheck(0, L, cond, rs, rt);
+}
+
+void MacroAssemblerRiscv64::BranchLong(Label* L) {
+ // Generate position independent long branch.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ int32_t imm;
+ imm = branch_long_offset(L);
+ GenPCRelativeJump(scratch, imm);
+}
+
+void MacroAssemblerRiscv64::BranchAndLinkLong(Label* L) {
+ // Generate position independent long branch and link.
+ int32_t imm;
+ imm = branch_long_offset(L);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ GenPCRelativeJumpAndLink(scratch, imm);
+}
+
+void MacroAssemblerRiscv64::ma_branch(Label* L, Condition cond, Register rs,
+ const Operand& rt, JumpKind jumpKind) {
+ if (L->used()) {
+ if (jumpKind == ShortJump && BranchShortCheck(0, L, cond, rs, rt)) {
+ return;
+ }
+ if (cond != Always) {
+ Label skip;
+ Condition neg_cond = InvertCondition(cond);
+ BranchShort(&skip, neg_cond, rs, rt);
+ BranchLong(L);
+ bind(&skip);
+ } else {
+ BranchLong(L);
+ EmitConstPoolWithJumpIfNeeded();
+ }
+ } else {
+ if (jumpKind == LongJump) {
+ if (cond != Always) {
+ Label skip;
+ Condition neg_cond = InvertCondition(cond);
+ BranchShort(&skip, neg_cond, rs, rt);
+ BranchLong(L);
+ bind(&skip);
+ } else {
+ BranchLong(L);
+ EmitConstPoolWithJumpIfNeeded();
+ }
+ } else {
+ BranchShort(L, cond, rs, rt);
+ }
+ }
+}
+
+// Branches when done from within riscv code.
+void MacroAssemblerRiscv64::ma_b(Register lhs, Address addr, Label* label,
+ Condition c, JumpKind jumpKind) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(lhs != scratch);
+ ma_load(scratch, addr, SizeDouble);
+ ma_b(lhs, Register(scratch), label, c, jumpKind);
+}
+
+void MacroAssemblerRiscv64::ma_b(Register lhs, ImmPtr imm, Label* l,
+ Condition c, JumpKind jumpKind) {
+ asMasm().ma_b(lhs, ImmWord(uintptr_t(imm.value)), l, c, jumpKind);
+}
+
+// Branches when done from within loongarch-specific code.
+void MacroAssemblerRiscv64::ma_b(Register lhs, ImmWord imm, Label* label,
+ Condition c, JumpKind jumpKind) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ MOZ_ASSERT(lhs != scratch);
+ ma_li(scratch, imm);
+ ma_b(lhs, Register(scratch), label, c, jumpKind);
+}
+
+void MacroAssemblerRiscv64::ma_b(Register lhs, Imm32 imm, Label* label,
+ Condition c, JumpKind jumpKind) {
+ if ((c == NonZero || c == Zero || c == Signed || c == NotSigned) &&
+ imm.value == 0) {
+ ma_b(lhs, lhs, label, c, jumpKind);
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ MOZ_ASSERT(lhs != scratch);
+ ma_li(scratch, imm);
+ ma_b(lhs, Register(scratch), label, c, jumpKind);
+ }
+}
+
+void MacroAssemblerRiscv64::ma_b(Address addr, Imm32 imm, Label* label,
+ Condition c, JumpKind jumpKind) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ ma_load(scratch2, addr);
+ ma_b(Register(scratch2), imm, label, c, jumpKind);
+}
+
+void MacroAssemblerRiscv64::ma_b(Register lhs, Register rhs, Label* label,
+ Condition c, JumpKind jumpKind) {
+ switch (c) {
+ case Equal:
+ case NotEqual:
+ ma_branch(label, c, lhs, rhs, jumpKind);
+ break;
+ case Always:
+ ma_branch(label, c, zero, Operand(zero), jumpKind);
+ break;
+ case Zero:
+ MOZ_ASSERT(lhs == rhs);
+ ma_branch(label, Equal, lhs, Operand(zero), jumpKind);
+ break;
+ case NonZero:
+ MOZ_ASSERT(lhs == rhs);
+ ma_branch(label, NotEqual, lhs, Operand(zero), jumpKind);
+ break;
+ case Signed:
+ MOZ_ASSERT(lhs == rhs);
+ ma_branch(label, LessThan, lhs, Operand(zero), jumpKind);
+ break;
+ case NotSigned:
+ MOZ_ASSERT(lhs == rhs);
+ ma_branch(label, GreaterThanOrEqual, lhs, Operand(zero), jumpKind);
+ break;
+ default: {
+ ma_branch(label, c, lhs, rhs, jumpKind);
+ break;
+ }
+ }
+}
+
+void MacroAssemblerRiscv64::ExtractBits(Register rt, Register rs, uint16_t pos,
+ uint16_t size, bool sign_extend) {
+#if JS_CODEGEN_RISCV64
+ MOZ_ASSERT(pos < 64 && 0 < size && size <= 64 && 0 < pos + size &&
+ pos + size <= 64);
+ slli(rt, rs, 64 - (pos + size));
+ if (sign_extend) {
+ srai(rt, rt, 64 - size);
+ } else {
+ srli(rt, rt, 64 - size);
+ }
+#elif JS_CODEGEN_RISCV32
+ MOZ_ASSERT(pos < 32);
+ MOZ_ASSERT(size > 0);
+ MOZ_ASSERT(size <= 32);
+ MOZ_ASSERT((pos + size) > 0);
+ MOZ_ASSERT((pos + size) <= 32);
+ slli(rt, rs, 32 - (pos + size));
+ if (sign_extend) {
+ srai(rt, rt, 32 - size);
+ } else {
+ srli(rt, rt, 32 - size);
+ }
+#endif
+}
+
+void MacroAssemblerRiscv64::InsertBits(Register dest, Register source, int pos,
+ int size) {
+#if JS_CODEGEN_RISCV64
+ MOZ_ASSERT(size < 64);
+#elif JS_CODEGEN_RISCV32
+ MOZ_ASSERT(size < 32);
+#endif
+ UseScratchRegisterScope temps(this);
+ Register mask = temps.Acquire();
+ BlockTrampolinePoolScope block_trampoline_pool(this, 9);
+ Register source_ = temps.Acquire();
+ // Create a mask of the length=size.
+ ma_li(mask, Imm32(1));
+ slli(mask, mask, size);
+ addi(mask, mask, -1);
+ and_(source_, mask, source);
+ slli(source_, source_, pos);
+ // Make a mask containing 0's. 0's start at "pos" with length=size.
+ slli(mask, mask, pos);
+ not_(mask, mask);
+ // cut area for insertion of source.
+ and_(dest, mask, dest);
+ // insert source
+ or_(dest, dest, source_);
+}
+
+void MacroAssemblerRiscv64::InsertBits(Register dest, Register source,
+ Register pos, int size) {
+#if JS_CODEGEN_RISCV64
+ MOZ_ASSERT(size < 64);
+#elif JS_CODEGEN_RISCV32
+ MOZ_ASSERT(size < 32);
+#endif
+ UseScratchRegisterScope temps(this);
+ Register mask = temps.Acquire();
+ BlockTrampolinePoolScope block_trampoline_pool(this, 9);
+ Register source_ = temps.Acquire();
+ // Create a mask of the length=size.
+ ma_li(mask, Imm32(1));
+ slli(mask, mask, size);
+ addi(mask, mask, -1);
+ and_(source_, mask, source);
+ sll(source_, source_, pos);
+ // Make a mask containing 0's. 0's start at "pos" with length=size.
+ sll(mask, mask, pos);
+ not_(mask, mask);
+ // cut area for insertion of source.
+ and_(dest, mask, dest);
+ // insert source
+ or_(dest, dest, source_);
+}
+
+void MacroAssemblerRiscv64::ma_add32(Register rd, Register rs, Operand rt) {
+ if (rt.is_imm()) {
+ if (is_int12(rt.immediate())) {
+ addiw(rd, rs, static_cast<int32_t>(rt.immediate()));
+ } else if ((-4096 <= rt.immediate() && rt.immediate() <= -2049) ||
+ (2048 <= rt.immediate() && rt.immediate() <= 4094)) {
+ addiw(rd, rs, rt.immediate() / 2);
+ addiw(rd, rd, rt.immediate() - (rt.immediate() / 2));
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ BlockTrampolinePoolScope block_trampoline_pool(this, 9);
+ ma_li(scratch, rt.immediate());
+ addw(rd, rs, scratch);
+ }
+ } else {
+ MOZ_ASSERT(rt.is_reg());
+ addw(rd, rs, rt.rm());
+ }
+}
+
+void MacroAssemblerRiscv64::ma_add64(Register rd, Register rs, Operand rt) {
+ if (rt.is_imm()) {
+ if (is_int12(rt.immediate())) {
+ addi(rd, rs, static_cast<int32_t>(rt.immediate()));
+ } else if ((-4096 <= rt.immediate() && rt.immediate() <= -2049) ||
+ (2048 <= rt.immediate() && rt.immediate() <= 4094)) {
+ addi(rd, rs, rt.immediate() / 2);
+ addi(rd, rd, rt.immediate() - (rt.immediate() / 2));
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ BlockTrampolinePoolScope block_trampoline_pool(this, 9);
+ ma_li(scratch, rt.immediate());
+ add(rd, rs, scratch);
+ }
+ } else {
+ MOZ_ASSERT(rt.is_reg());
+ add(rd, rs, rt.rm());
+ }
+}
+
+void MacroAssemblerRiscv64::ma_sub32(Register rd, Register rs, Operand rt) {
+ if (rt.is_imm()) {
+ if (is_int12(-rt.immediate())) {
+ addiw(rd, rs,
+ static_cast<int32_t>(
+ -rt.immediate())); // No subi instr, use addi(x, y, -imm).
+ } else if ((-4096 <= -rt.immediate() && -rt.immediate() <= -2049) ||
+ (2048 <= -rt.immediate() && -rt.immediate() <= 4094)) {
+ addiw(rd, rs, -rt.immediate() / 2);
+ addiw(rd, rd, -rt.immediate() - (-rt.immediate() / 2));
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ma_li(scratch, rt.immediate());
+ subw(rd, rs, scratch);
+ }
+ } else {
+ MOZ_ASSERT(rt.is_reg());
+ subw(rd, rs, rt.rm());
+ }
+}
+
+void MacroAssemblerRiscv64::ma_sub64(Register rd, Register rs, Operand rt) {
+ if (rt.is_imm()) {
+ if (is_int12(-rt.immediate())) {
+ addi(rd, rs,
+ static_cast<int32_t>(
+ -rt.immediate())); // No subi instr, use addi(x, y, -imm).
+ } else if ((-4096 <= -rt.immediate() && -rt.immediate() <= -2049) ||
+ (2048 <= -rt.immediate() && -rt.immediate() <= 4094)) {
+ addi(rd, rs, -rt.immediate() / 2);
+ addi(rd, rd, -rt.immediate() - (-rt.immediate() / 2));
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ma_li(scratch, rt.immediate());
+ sub(rd, rs, scratch);
+ }
+ } else {
+ MOZ_ASSERT(rt.is_reg());
+ sub(rd, rs, rt.rm());
+ }
+}
+
+void MacroAssemblerRiscv64::ma_and(Register rd, Register rs, Operand rt) {
+ if (rt.is_imm()) {
+ if (is_int12(rt.immediate())) {
+ andi(rd, rs, rt.immediate());
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ ma_li(ScratchRegister, rt.immediate());
+ and_(rd, rs, ScratchRegister);
+ }
+ } else {
+ MOZ_ASSERT(rt.is_reg());
+ and_(rd, rs, rt.rm());
+ }
+}
+
+void MacroAssemblerRiscv64::ma_or(Register rd, Register rs, Operand rt) {
+ if (rt.is_imm()) {
+ if (is_int12(rt.immediate())) {
+ ori(rd, rs, rt.immediate());
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ ma_li(ScratchRegister, rt.immediate());
+ or_(rd, rs, ScratchRegister);
+ }
+ } else {
+ MOZ_ASSERT(rt.is_reg());
+ or_(rd, rs, rt.rm());
+ }
+}
+
+void MacroAssemblerRiscv64::ma_xor(Register rd, Register rs, Operand rt) {
+ if (rt.is_imm()) {
+ if (is_int12(rt.immediate())) {
+ xori(rd, rs, rt.immediate());
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ ma_li(ScratchRegister, rt.immediate());
+ xor_(rd, rs, ScratchRegister);
+ }
+ } else {
+ MOZ_ASSERT(rt.is_reg());
+ xor_(rd, rs, rt.rm());
+ }
+}
+
+void MacroAssemblerRiscv64::ma_nor(Register rd, Register rs, Operand rt) {
+ if (rt.is_imm()) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ ma_li(ScratchRegister, rt.immediate());
+ nor(rd, rs, ScratchRegister);
+ } else {
+ MOZ_ASSERT(rt.is_reg());
+ nor(rd, rs, rt.rm());
+ }
+}
+
+void MacroAssemblerRiscv64::ma_div32(Register rd, Register rs, Operand rt) {
+ if (rt.is_imm()) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ ma_li(ScratchRegister, rt.immediate());
+ divw(rd, rs, ScratchRegister);
+ } else {
+ MOZ_ASSERT(rt.is_reg());
+ divw(rd, rs, rt.rm());
+ }
+}
+
+void MacroAssemblerRiscv64::ma_divu32(Register rd, Register rs, Operand rt) {
+ if (rt.is_imm()) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ ma_li(ScratchRegister, rt.immediate());
+ divuw(rd, rs, ScratchRegister);
+ } else {
+ MOZ_ASSERT(rt.is_reg());
+ divuw(rd, rs, rt.rm());
+ }
+}
+
+void MacroAssemblerRiscv64::ma_div64(Register rd, Register rs, Operand rt) {
+ if (rt.is_imm()) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ ma_li(ScratchRegister, rt.immediate());
+ div(rd, rs, ScratchRegister);
+ } else {
+ MOZ_ASSERT(rt.is_reg());
+ div(rd, rs, rt.rm());
+ }
+}
+
+void MacroAssemblerRiscv64::ma_divu64(Register rd, Register rs, Operand rt) {
+ if (rt.is_imm()) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ ma_li(ScratchRegister, rt.immediate());
+ divu(rd, rs, ScratchRegister);
+ } else {
+ MOZ_ASSERT(rt.is_reg());
+ divu(rd, rs, rt.rm());
+ }
+}
+
+void MacroAssemblerRiscv64::ma_mod32(Register rd, Register rs, Operand rt) {
+ if (rt.is_imm()) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ ma_li(ScratchRegister, rt.immediate());
+ remw(rd, rs, ScratchRegister);
+ } else {
+ MOZ_ASSERT(rt.is_reg());
+ remw(rd, rs, rt.rm());
+ }
+}
+
+void MacroAssemblerRiscv64::ma_modu32(Register rd, Register rs, Operand rt) {
+ if (rt.is_imm()) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ ma_li(ScratchRegister, rt.immediate());
+ remuw(rd, rs, ScratchRegister);
+ } else {
+ MOZ_ASSERT(rt.is_reg());
+ remuw(rd, rs, rt.rm());
+ }
+}
+
+void MacroAssemblerRiscv64::ma_mod64(Register rd, Register rs, Operand rt) {
+ if (rt.is_imm()) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ ma_li(ScratchRegister, rt.immediate());
+ rem(rd, rs, ScratchRegister);
+ } else {
+ MOZ_ASSERT(rt.is_reg());
+ rem(rd, rs, rt.rm());
+ }
+}
+
+void MacroAssemblerRiscv64::ma_modu64(Register rd, Register rs, Operand rt) {
+ if (rt.is_imm()) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ ma_li(ScratchRegister, rt.immediate());
+ remu(rd, rs, ScratchRegister);
+ } else {
+ MOZ_ASSERT(rt.is_reg());
+ remu(rd, rs, rt.rm());
+ }
+}
+
+void MacroAssemblerRiscv64::ma_mul32(Register rd, Register rs, Operand rt) {
+ if (rt.is_imm()) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ ma_li(ScratchRegister, rt.immediate());
+ mulw(rd, rs, ScratchRegister);
+ } else {
+ MOZ_ASSERT(rt.is_reg());
+ mulw(rd, rs, rt.rm());
+ }
+}
+
+void MacroAssemblerRiscv64::ma_mulh32(Register rd, Register rs, Operand rt) {
+ if (rt.is_imm()) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ ma_li(ScratchRegister, rt.immediate());
+ mul(rd, rs, ScratchRegister);
+ } else {
+ MOZ_ASSERT(rt.is_reg());
+ mul(rd, rs, rt.rm());
+ }
+ srai(rd, rd, 32);
+}
+
+void MacroAssemblerRiscv64::ma_mul64(Register rd, Register rs, Operand rt) {
+ if (rt.is_imm()) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ ma_li(ScratchRegister, rt.immediate());
+ mul(rd, rs, ScratchRegister);
+ } else {
+ MOZ_ASSERT(rt.is_reg());
+ mul(rd, rs, rt.rm());
+ }
+}
+
+void MacroAssemblerRiscv64::ma_mulh64(Register rd, Register rs, Operand rt) {
+ if (rt.is_imm()) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ ma_li(ScratchRegister, rt.immediate());
+ mulh(rd, rs, ScratchRegister);
+ } else {
+ MOZ_ASSERT(rt.is_reg());
+ mulh(rd, rs, rt.rm());
+ }
+}
+
+void MacroAssemblerRiscv64::ma_sll64(Register rd, Register rs, Operand rt) {
+ if (rt.is_reg()) {
+ sll(rd, rs, rt.rm());
+ } else {
+ MOZ_ASSERT(rt.is_imm());
+ uint8_t shamt = static_cast<uint8_t>(rt.immediate());
+ slli(rd, rs, shamt);
+ }
+}
+
+void MacroAssemblerRiscv64::ma_sll32(Register rd, Register rs, Operand rt) {
+ if (rt.is_reg()) {
+ sllw(rd, rs, rt.rm());
+ } else {
+ MOZ_ASSERT(rt.is_imm());
+ uint8_t shamt = static_cast<uint8_t>(rt.immediate());
+ slliw(rd, rs, shamt);
+ }
+}
+
+void MacroAssemblerRiscv64::ma_sra64(Register rd, Register rs, Operand rt) {
+ if (rt.is_reg()) {
+ sra(rd, rs, rt.rm());
+ } else {
+ MOZ_ASSERT(rt.is_imm());
+ uint8_t shamt = static_cast<uint8_t>(rt.immediate());
+ srai(rd, rs, shamt);
+ }
+}
+
+void MacroAssemblerRiscv64::ma_sra32(Register rd, Register rs, Operand rt) {
+ if (rt.is_reg()) {
+ sraw(rd, rs, rt.rm());
+ } else {
+ MOZ_ASSERT(rt.is_imm());
+ uint8_t shamt = static_cast<uint8_t>(rt.immediate());
+ sraiw(rd, rs, shamt);
+ }
+}
+
+void MacroAssemblerRiscv64::ma_srl64(Register rd, Register rs, Operand rt) {
+ if (rt.is_reg()) {
+ srl(rd, rs, rt.rm());
+ } else {
+ MOZ_ASSERT(rt.is_imm());
+ uint8_t shamt = static_cast<uint8_t>(rt.immediate());
+ srli(rd, rs, shamt);
+ }
+}
+
+void MacroAssemblerRiscv64::ma_srl32(Register rd, Register rs, Operand rt) {
+ if (rt.is_reg()) {
+ srlw(rd, rs, rt.rm());
+ } else {
+ MOZ_ASSERT(rt.is_imm());
+ uint8_t shamt = static_cast<uint8_t>(rt.immediate());
+ srliw(rd, rs, shamt);
+ }
+}
+
+void MacroAssemblerRiscv64::ma_slt(Register rd, Register rs, Operand rt) {
+ if (rt.is_reg()) {
+ slt(rd, rs, rt.rm());
+ } else {
+ MOZ_ASSERT(rt.is_imm());
+ if (is_int12(rt.immediate())) {
+ slti(rd, rs, static_cast<int32_t>(rt.immediate()));
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ BlockTrampolinePoolScope block_trampoline_pool(this, 9);
+ ma_li(scratch, rt.immediate());
+ slt(rd, rs, scratch);
+ }
+ }
+}
+
+void MacroAssemblerRiscv64::ma_sltu(Register rd, Register rs, Operand rt) {
+ if (rt.is_reg()) {
+ sltu(rd, rs, rt.rm());
+ } else {
+ MOZ_ASSERT(rt.is_imm());
+ if (is_int12(rt.immediate())) {
+ sltiu(rd, rs, static_cast<int32_t>(rt.immediate()));
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ BlockTrampolinePoolScope block_trampoline_pool(this, 9);
+ ma_li(scratch, rt.immediate());
+ sltu(rd, rs, scratch);
+ }
+ }
+}
+
+void MacroAssemblerRiscv64::ma_sle(Register rd, Register rs, Operand rt) {
+ if (rt.is_reg()) {
+ slt(rd, rt.rm(), rs);
+ } else {
+ MOZ_ASSERT(rt.is_imm());
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ BlockTrampolinePoolScope block_trampoline_pool(this, 9);
+ ma_li(scratch, rt.immediate());
+ slt(rd, scratch, rs);
+ }
+ xori(rd, rd, 1);
+}
+
+void MacroAssemblerRiscv64::ma_sleu(Register rd, Register rs, Operand rt) {
+ if (rt.is_reg()) {
+ sltu(rd, rt.rm(), rs);
+ } else {
+ MOZ_ASSERT(rt.is_imm());
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ BlockTrampolinePoolScope block_trampoline_pool(this, 9);
+ ma_li(scratch, rt.immediate());
+ sltu(rd, scratch, rs);
+ }
+ xori(rd, rd, 1);
+}
+
+void MacroAssemblerRiscv64::ma_sgt(Register rd, Register rs, Operand rt) {
+ if (rt.is_reg()) {
+ slt(rd, rt.rm(), rs);
+ } else {
+ MOZ_ASSERT(rt.is_imm());
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ BlockTrampolinePoolScope block_trampoline_pool(this, 9);
+ ma_li(scratch, rt.immediate());
+ slt(rd, scratch, rs);
+ }
+}
+
+void MacroAssemblerRiscv64::ma_sgtu(Register rd, Register rs, Operand rt) {
+ if (rt.is_reg()) {
+ sltu(rd, rt.rm(), rs);
+ } else {
+ MOZ_ASSERT(rt.is_imm());
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ BlockTrampolinePoolScope block_trampoline_pool(this, 9);
+ ma_li(scratch, rt.immediate());
+ sltu(rd, scratch, rs);
+ }
+}
+
+void MacroAssemblerRiscv64::ma_sge(Register rd, Register rs, Operand rt) {
+ ma_slt(rd, rs, rt);
+ xori(rd, rd, 1);
+}
+
+void MacroAssemblerRiscv64::ma_sgeu(Register rd, Register rs, Operand rt) {
+ ma_sltu(rd, rs, rt);
+ xori(rd, rd, 1);
+}
+
+static inline bool IsZero(const Operand& rt) {
+ if (rt.is_reg()) {
+ return rt.rm() == zero_reg;
+ } else {
+ MOZ_ASSERT(rt.is_imm());
+ return rt.immediate() == 0;
+ }
+}
+
+void MacroAssemblerRiscv64::ma_seq(Register rd, Register rs, Operand rt) {
+ if (rs == zero_reg) {
+ ma_seqz(rd, rt);
+ } else if (IsZero(rt)) {
+ seqz(rd, rs);
+ } else {
+ ma_sub64(rd, rs, rt);
+ seqz(rd, rd);
+ }
+}
+
+void MacroAssemblerRiscv64::ma_sne(Register rd, Register rs, Operand rt) {
+ if (rs == zero_reg) {
+ ma_snez(rd, rt);
+ } else if (IsZero(rt)) {
+ snez(rd, rs);
+ } else {
+ ma_sub64(rd, rs, rt);
+ snez(rd, rd);
+ }
+}
+
+void MacroAssemblerRiscv64::ma_seqz(Register rd, const Operand& rt) {
+ if (rt.is_reg()) {
+ seqz(rd, rt.rm());
+ } else {
+ ma_li(rd, rt.immediate() == 0);
+ }
+}
+
+void MacroAssemblerRiscv64::ma_snez(Register rd, const Operand& rt) {
+ if (rt.is_reg()) {
+ snez(rd, rt.rm());
+ } else {
+ ma_li(rd, rt.immediate() != 0);
+ }
+}
+
+void MacroAssemblerRiscv64::ma_neg(Register rd, const Operand& rt) {
+ MOZ_ASSERT(rt.is_reg());
+ neg(rd, rt.rm());
+}
+
+void MacroAssemblerRiscv64::ma_jump(ImmPtr dest) {
+ DEBUG_PRINTF("[ %s\n", __FUNCTION__);
+ BlockTrampolinePoolScope block_trampoline_pool(this, 8);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ asMasm().ma_liPatchable(scratch, dest);
+ jr(scratch, 0);
+ DEBUG_PRINTF("]\n");
+}
+// fp instructions
+void MacroAssemblerRiscv64::ma_lid(FloatRegister dest, double value) {
+ ImmWord imm(mozilla::BitwiseCast<uint64_t>(value));
+
+ if (imm.value != 0) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ma_li(scratch, imm);
+ fmv_d_x(dest, scratch);
+ } else {
+ fmv_d_x(dest, zero);
+ }
+}
+// fp instructions
+void MacroAssemblerRiscv64::ma_lis(FloatRegister dest, float value) {
+ Imm32 imm(mozilla::BitwiseCast<uint32_t>(value));
+
+ if (imm.value != 0) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ma_li(scratch, imm);
+ fmv_w_x(dest, scratch);
+ } else {
+ fmv_w_x(dest, zero);
+ }
+}
+
+void MacroAssemblerRiscv64::ma_sub32TestOverflow(Register rd, Register rj,
+ Register rk, Label* overflow) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ sub(scratch, rj, rk);
+ subw(rd, rj, rk);
+ ma_b(rd, Register(scratch), overflow, Assembler::NotEqual);
+}
+
+void MacroAssemblerRiscv64::ma_sub32TestOverflow(Register rd, Register rj,
+ Imm32 imm, Label* overflow) {
+ if (imm.value != INT32_MIN) {
+ asMasm().ma_add32TestOverflow(rd, rj, Imm32(-imm.value), overflow);
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ MOZ_ASSERT(rj != scratch);
+ ma_li(scratch, Imm32(imm.value));
+ asMasm().ma_sub32TestOverflow(rd, rj, scratch, overflow);
+ }
+}
+
+void MacroAssemblerRiscv64::ma_add32TestOverflow(Register rd, Register rj,
+ Register rk, Label* overflow) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ add(scratch, rj, rk);
+ addw(rd, rj, rk);
+ ma_b(rd, Register(scratch), overflow, Assembler::NotEqual);
+}
+
+void MacroAssemblerRiscv64::ma_add32TestOverflow(Register rd, Register rj,
+ Imm32 imm, Label* overflow) {
+ // Check for signed range because of addi
+ if (is_intn(imm.value, 12)) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ addi(scratch, rj, imm.value);
+ addiw(rd, rj, imm.value);
+ ma_b(rd, scratch, overflow, Assembler::NotEqual);
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ ma_li(scratch2, imm);
+ ma_add32TestOverflow(rd, rj, scratch2, overflow);
+ }
+}
+
+void MacroAssemblerRiscv64::ma_subPtrTestOverflow(Register rd, Register rj,
+ Register rk,
+ Label* overflow) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ MOZ_ASSERT_IF(rj == rd, rj != rk);
+ MOZ_ASSERT(rj != scratch2);
+ MOZ_ASSERT(rk != scratch2);
+ MOZ_ASSERT(rd != scratch2);
+
+ Register rj_copy = rj;
+
+ if (rj == rd) {
+ ma_or(scratch2, rj, zero);
+ rj_copy = scratch2;
+ }
+
+ {
+ Register scratch = temps.Acquire();
+ MOZ_ASSERT(rd != scratch);
+
+ sub(rd, rj, rk);
+ // If the sign of rj and rk are the same, no overflow
+ ma_xor(scratch, rj_copy, rk);
+ // Check if the sign of rd and rj are the same
+ ma_xor(scratch2, rd, rj_copy);
+ ma_and(scratch2, scratch2, scratch);
+ }
+
+ ma_b(scratch2, zero, overflow, Assembler::LessThan);
+}
+
+void MacroAssemblerRiscv64::ma_addPtrTestOverflow(Register rd, Register rj,
+ Register rk,
+ Label* overflow) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ MOZ_ASSERT(rd != scratch);
+
+ if (rj == rk) {
+ if (rj == rd) {
+ ma_or(scratch, rj, zero);
+ rj = scratch;
+ }
+
+ add(rd, rj, rj);
+ ma_xor(scratch, rj, rd);
+ ma_b(scratch, zero, overflow, Assembler::LessThan);
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ MOZ_ASSERT(rj != scratch);
+ MOZ_ASSERT(rd != scratch2);
+
+ if (rj == rd) {
+ ma_or(scratch2, rj, zero);
+ rj = scratch2;
+ }
+
+ add(rd, rj, rk);
+ slti(scratch, rj, 0);
+ slt(scratch2, rd, rj);
+ ma_b(scratch, Register(scratch2), overflow, Assembler::NotEqual);
+ }
+}
+
+void MacroAssemblerRiscv64::ma_addPtrTestOverflow(Register rd, Register rj,
+ Imm32 imm, Label* overflow) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+
+ if (imm.value == 0) {
+ ori(rd, rj, 0);
+ return;
+ }
+
+ if (rj == rd) {
+ ori(scratch2, rj, 0);
+ rj = scratch2;
+ }
+
+ ma_add64(rd, rj, imm);
+
+ if (imm.value > 0) {
+ ma_b(rd, rj, overflow, Assembler::LessThan);
+ } else {
+ MOZ_ASSERT(imm.value < 0);
+ ma_b(rd, rj, overflow, Assembler::GreaterThan);
+ }
+}
+
+void MacroAssemblerRiscv64::ma_addPtrTestOverflow(Register rd, Register rj,
+ ImmWord imm,
+ Label* overflow) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+
+ if (imm.value == 0) {
+ ori(rd, rj, 0);
+ return;
+ }
+
+ if (rj == rd) {
+ MOZ_ASSERT(rj != scratch2);
+ ori(scratch2, rj, 0);
+ rj = scratch2;
+ }
+
+ ma_li(rd, imm);
+ add(rd, rj, rd);
+
+ if (imm.value > 0) {
+ ma_b(rd, rj, overflow, Assembler::LessThan);
+ } else {
+ MOZ_ASSERT(imm.value < 0);
+ ma_b(rd, rj, overflow, Assembler::GreaterThan);
+ }
+}
+
+void MacroAssemblerRiscv64::ma_add32TestCarry(Condition cond, Register rd,
+ Register rj, Register rk,
+ Label* overflow) {
+ MOZ_ASSERT(cond == Assembler::CarrySet || cond == Assembler::CarryClear);
+ MOZ_ASSERT_IF(rd == rj, rk != rd);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ addw(rd, rj, rk);
+ sltu(scratch, rd, rd == rj ? rk : rj);
+ ma_b(Register(scratch), Register(scratch), overflow,
+ cond == Assembler::CarrySet ? Assembler::NonZero : Assembler::Zero);
+}
+
+void MacroAssemblerRiscv64::ma_add32TestCarry(Condition cond, Register rd,
+ Register rj, Imm32 imm,
+ Label* overflow) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ MOZ_ASSERT(rj != scratch2);
+ ma_li(scratch2, imm);
+ ma_add32TestCarry(cond, rd, rj, scratch2, overflow);
+}
+
+void MacroAssemblerRiscv64::ma_subPtrTestOverflow(Register rd, Register rj,
+ Imm32 imm, Label* overflow) {
+ // TODO(loong64): Check subPtrTestOverflow
+ MOZ_ASSERT(imm.value != INT32_MIN);
+ ma_addPtrTestOverflow(rd, rj, Imm32(-imm.value), overflow);
+}
+
+void MacroAssemblerRiscv64::ma_addPtrTestCarry(Condition cond, Register rd,
+ Register rj, Register rk,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ MOZ_ASSERT(rd != rk);
+ MOZ_ASSERT(rd != scratch);
+ add(rd, rj, rk);
+ sltu(scratch, rd, rk);
+ ma_b(scratch, Register(scratch), label,
+ cond == Assembler::CarrySet ? Assembler::NonZero : Assembler::Zero);
+}
+
+void MacroAssemblerRiscv64::ma_addPtrTestCarry(Condition cond, Register rd,
+ Register rj, Imm32 imm,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+
+ // Check for signed range because of addi
+ if (is_intn(imm.value, 12)) {
+ addi(rd, rj, imm.value);
+ sltiu(scratch2, rd, imm.value);
+ ma_b(scratch2, scratch2, label,
+ cond == Assembler::CarrySet ? Assembler::NonZero : Assembler::Zero);
+ } else {
+ ma_li(scratch2, imm);
+ ma_addPtrTestCarry(cond, rd, rj, scratch2, label);
+ }
+}
+
+void MacroAssemblerRiscv64::ma_addPtrTestCarry(Condition cond, Register rd,
+ Register rj, ImmWord imm,
+ Label* label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+
+ // Check for signed range because of addi_d
+ if (is_intn(imm.value, 12)) {
+ uint32_t value = imm.value;
+ addi(rd, rj, value);
+ ma_sltu(scratch2, rd, Operand(value));
+ ma_b(scratch2, scratch2, label,
+ cond == Assembler::CarrySet ? Assembler::NonZero : Assembler::Zero);
+ } else {
+ ma_li(scratch2, imm);
+ ma_addPtrTestCarry(cond, rd, rj, scratch2, label);
+ }
+}
+
+void MacroAssemblerRiscv64::ma_load(Register dest, const BaseIndex& src,
+ LoadStoreSize size,
+ LoadStoreExtension extension) {
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ asMasm().computeScaledAddress(src, scratch2);
+ asMasm().ma_load(dest, Address(scratch2, src.offset), size, extension);
+}
+void MacroAssemblerRiscv64::ma_pop(FloatRegister f) {
+ fld(f, StackPointer, 0);
+ addi(StackPointer, StackPointer, sizeof(double));
+}
+
+void MacroAssemblerRiscv64::ma_push(FloatRegister f) {
+ addi(StackPointer, StackPointer, (int32_t) - sizeof(double));
+ fsd(f, StackPointer, 0);
+}
+
+void MacroAssemblerRiscv64::ma_fld_s(FloatRegister ft, Address address) {
+ int32_t offset = address.offset;
+ Register base = address.base;
+
+ if (is_intn(offset, 12)) {
+ flw(ft, base, offset);
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ MOZ_ASSERT(base != scratch);
+ ma_li(scratch, Imm32(offset));
+ ma_add64(scratch, base, scratch);
+ flw(ft, scratch, 0);
+ }
+}
+void MacroAssemblerRiscv64::ma_fld_d(FloatRegister ft, Address address) {
+ int32_t offset = address.offset;
+ Register base = address.base;
+
+ if (is_intn(offset, 12)) {
+ fld(ft, base, offset);
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ MOZ_ASSERT(base != scratch);
+ ma_li(scratch, Imm32(offset));
+ ma_add64(scratch, base, scratch);
+ fld(ft, scratch, 0);
+ }
+}
+void MacroAssemblerRiscv64::ma_fst_d(FloatRegister ft, Address address) {
+ int32_t offset = address.offset;
+ Register base = address.base;
+
+ if (is_intn(offset, 12)) {
+ fsd(ft, base, offset);
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ MOZ_ASSERT(base != scratch);
+ ma_li(scratch, Imm32(offset));
+ ma_add64(scratch, base, scratch);
+ fsd(ft, scratch, 0);
+ }
+}
+void MacroAssemblerRiscv64::ma_fst_s(FloatRegister ft, Address address) {
+ int32_t offset = address.offset;
+ Register base = address.base;
+
+ if (is_intn(offset, 12)) {
+ fsw(ft, base, offset);
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ MOZ_ASSERT(base != scratch);
+ ma_li(scratch, Imm32(offset));
+ ma_add64(scratch, base, scratch);
+ fsw(ft, scratch, 0);
+ }
+}
+
+void MacroAssemblerRiscv64::ma_fst_d(FloatRegister ft, BaseIndex address) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ asMasm().computeScaledAddress(address, scratch);
+ asMasm().ma_fst_d(ft, Address(scratch, address.offset));
+}
+
+void MacroAssemblerRiscv64::ma_fst_s(FloatRegister ft, BaseIndex address) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ asMasm().computeScaledAddress(address, scratch);
+ asMasm().ma_fst_s(ft, Address(scratch, address.offset));
+}
+
+void MacroAssemblerRiscv64::ma_fld_d(FloatRegister ft, const BaseIndex& src) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ asMasm().computeScaledAddress(src, scratch);
+ asMasm().ma_fld_d(ft, Address(scratch, src.offset));
+}
+
+void MacroAssemblerRiscv64::ma_fld_s(FloatRegister ft, const BaseIndex& src) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ asMasm().computeScaledAddress(src, scratch);
+ asMasm().ma_fld_s(ft, Address(scratch, src.offset));
+}
+
+void MacroAssemblerRiscv64::ma_call(ImmPtr dest) {
+ DEBUG_PRINTF("[ %s\n", __FUNCTION__);
+ BlockTrampolinePoolScope block_trampoline_pool(this, 8);
+ UseScratchRegisterScope temps(this);
+ temps.Exclude(GeneralRegisterSet(1 << CallReg.code()));
+ asMasm().ma_liPatchable(CallReg, dest);
+ jalr(CallReg, 0);
+ DEBUG_PRINTF("]\n");
+}
+
+void MacroAssemblerRiscv64::CompareIsNotNanF32(Register rd, FPURegister cmp1,
+ FPURegister cmp2) {
+ UseScratchRegisterScope temps(this);
+ BlockTrampolinePoolScope block_trampoline_pool(this, 3);
+ Register scratch = temps.Acquire();
+
+ feq_s(rd, cmp1, cmp1); // rd <- !isNan(cmp1)
+ feq_s(scratch, cmp2, cmp2); // scratch <- !isNaN(cmp2)
+ ma_and(rd, rd, scratch); // rd <- !isNan(cmp1) && !isNan(cmp2)
+}
+
+void MacroAssemblerRiscv64::CompareIsNotNanF64(Register rd, FPURegister cmp1,
+ FPURegister cmp2) {
+ UseScratchRegisterScope temps(this);
+ BlockTrampolinePoolScope block_trampoline_pool(this, 3);
+ Register scratch = temps.Acquire();
+
+ feq_d(rd, cmp1, cmp1); // rd <- !isNan(cmp1)
+ feq_d(scratch, cmp2, cmp2); // scratch <- !isNaN(cmp2)
+ ma_and(rd, rd, scratch); // rd <- !isNan(cmp1) && !isNan(cmp2)
+}
+
+void MacroAssemblerRiscv64::CompareIsNanF32(Register rd, FPURegister cmp1,
+ FPURegister cmp2) {
+ CompareIsNotNanF32(rd, cmp1, cmp2); // rd <- !isNan(cmp1) && !isNan(cmp2)
+ ma_xor(rd, rd, Operand(1)); // rd <- isNan(cmp1) || isNan(cmp2)
+}
+
+void MacroAssemblerRiscv64::CompareIsNanF64(Register rd, FPURegister cmp1,
+ FPURegister cmp2) {
+ CompareIsNotNanF64(rd, cmp1, cmp2); // rd <- !isNan(cmp1) && !isNan(cmp2)
+ ma_xor(rd, rd, Operand(1)); // rd <- isNan(cmp1) || isNan(cmp2)
+}
+
+void MacroAssemblerRiscv64::Clz32(Register rd, Register xx) {
+ // 32 bit unsigned in lower word: count number of leading zeros.
+ // int n = 32;
+ // unsigned y;
+
+ // y = x >>16; if (y != 0) { n = n -16; x = y; }
+ // y = x >> 8; if (y != 0) { n = n - 8; x = y; }
+ // y = x >> 4; if (y != 0) { n = n - 4; x = y; }
+ // y = x >> 2; if (y != 0) { n = n - 2; x = y; }
+ // y = x >> 1; if (y != 0) {rd = n - 2; return;}
+ // rd = n - x;
+
+ Label L0, L1, L2, L3, L4;
+ UseScratchRegisterScope temps(this);
+ Register x = rd;
+ Register y = temps.Acquire();
+ Register n = temps.Acquire();
+ MOZ_ASSERT(xx != y && xx != n);
+ mv(x, xx);
+ ma_li(n, Imm32(32));
+#if JS_CODEGEN_RISCV64
+ srliw(y, x, 16);
+ ma_branch(&L0, Equal, y, Operand(zero_reg));
+ mv(x, y);
+ addiw(n, n, -16);
+ bind(&L0);
+ srliw(y, x, 8);
+ ma_branch(&L1, Equal, y, Operand(zero_reg));
+ addiw(n, n, -8);
+ mv(x, y);
+ bind(&L1);
+ srliw(y, x, 4);
+ ma_branch(&L2, Equal, y, Operand(zero_reg));
+ addiw(n, n, -4);
+ mv(x, y);
+ bind(&L2);
+ srliw(y, x, 2);
+ ma_branch(&L3, Equal, y, Operand(zero_reg));
+ addiw(n, n, -2);
+ mv(x, y);
+ bind(&L3);
+ srliw(y, x, 1);
+ subw(rd, n, x);
+ ma_branch(&L4, Equal, y, Operand(zero_reg));
+ addiw(rd, n, -2);
+ bind(&L4);
+#elif JS_CODEGEN_RISCV32
+ srli(y, x, 16);
+ ma_branch(&L0, Equal, y, Operand(zero_reg));
+ mv(x, y);
+ addi(n, n, -16);
+ bind(&L0);
+ srli(y, x, 8);
+ ma_branch(&L1, Equal, y, Operand(zero_reg));
+ addi(n, n, -8);
+ mv(x, y);
+ bind(&L1);
+ srli(y, x, 4);
+ ma_branch(&L2, Equal, y, Operand(zero_reg));
+ addi(n, n, -4);
+ mv(x, y);
+ bind(&L2);
+ srli(y, x, 2);
+ ma_branch(&L3, Equal, y, Operand(zero_reg));
+ addi(n, n, -2);
+ mv(x, y);
+ bind(&L3);
+ srli(y, x, 1);
+ sub(rd, n, x);
+ ma_branch(&L4, Equal, y, Operand(zero_reg));
+ addi(rd, n, -2);
+ bind(&L4);
+#endif
+}
+
+#if JS_CODEGEN_RISCV64
+void MacroAssemblerRiscv64::Clz64(Register rd, Register xx) {
+ // 64 bit: count number of leading zeros.
+ // int n = 64;
+ // unsigned y;
+
+ // y = x >>32; if (y != 0) { n = n - 32; x = y; }
+ // y = x >>16; if (y != 0) { n = n - 16; x = y; }
+ // y = x >> 8; if (y != 0) { n = n - 8; x = y; }
+ // y = x >> 4; if (y != 0) { n = n - 4; x = y; }
+ // y = x >> 2; if (y != 0) { n = n - 2; x = y; }
+ // y = x >> 1; if (y != 0) {rd = n - 2; return;}
+ // rd = n - x;
+
+ Label L0, L1, L2, L3, L4, L5;
+ UseScratchRegisterScope temps(this);
+ Register x = rd;
+ Register y = temps.Acquire();
+ Register n = temps.Acquire();
+ MOZ_ASSERT(xx != y && xx != n);
+ mv(x, xx);
+ ma_li(n, Imm32(64));
+ srli(y, x, 32);
+ ma_branch(&L0, Equal, y, Operand(zero_reg));
+ addiw(n, n, -32);
+ mv(x, y);
+ bind(&L0);
+ srli(y, x, 16);
+ ma_branch(&L1, Equal, y, Operand(zero_reg));
+ addiw(n, n, -16);
+ mv(x, y);
+ bind(&L1);
+ srli(y, x, 8);
+ ma_branch(&L2, Equal, y, Operand(zero_reg));
+ addiw(n, n, -8);
+ mv(x, y);
+ bind(&L2);
+ srli(y, x, 4);
+ ma_branch(&L3, Equal, y, Operand(zero_reg));
+ addiw(n, n, -4);
+ mv(x, y);
+ bind(&L3);
+ srli(y, x, 2);
+ ma_branch(&L4, Equal, y, Operand(zero_reg));
+ addiw(n, n, -2);
+ mv(x, y);
+ bind(&L4);
+ srli(y, x, 1);
+ subw(rd, n, x);
+ ma_branch(&L5, Equal, y, Operand(zero_reg));
+ addiw(rd, n, -2);
+ bind(&L5);
+}
+#endif
+void MacroAssemblerRiscv64::Ctz32(Register rd, Register rs) {
+ // Convert trailing zeroes to trailing ones, and bits to their left
+ // to zeroes.
+
+ {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ma_add64(scratch, rs, Operand(-1));
+ ma_xor(rd, scratch, rs);
+ ma_and(rd, rd, scratch);
+ // Count number of leading zeroes.
+ }
+ Clz32(rd, rd);
+ {
+ // Subtract number of leading zeroes from 32 to get number of trailing
+ // ones. Remember that the trailing ones were formerly trailing zeroes.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ma_li(scratch, Imm32(32));
+ ma_sub32(rd, scratch, rd);
+ }
+}
+#if JS_CODEGEN_RISCV64
+void MacroAssemblerRiscv64::Ctz64(Register rd, Register rs) {
+ // Convert trailing zeroes to trailing ones, and bits to their left
+ // to zeroes.
+ {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ma_add64(scratch, rs, Operand(-1));
+ ma_xor(rd, scratch, rs);
+ ma_and(rd, rd, scratch);
+ // Count number of leading zeroes.
+ }
+ Clz64(rd, rd);
+ {
+ // Subtract number of leading zeroes from 64 to get number of trailing
+ // ones. Remember that the trailing ones were formerly trailing zeroes.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ma_li(scratch, 64);
+ ma_sub64(rd, scratch, rd);
+ }
+}
+#endif
+void MacroAssemblerRiscv64::Popcnt32(Register rd, Register rs,
+ Register scratch) {
+ MOZ_ASSERT(scratch != rs);
+ MOZ_ASSERT(scratch != rd);
+ // https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
+ //
+ // A generalization of the best bit counting method to integers of
+ // bit-widths up to 128 (parameterized by type T) is this:
+ //
+ // v = v - ((v >> 1) & (T)~(T)0/3); // temp
+ // v = (v & (T)~(T)0/15*3) + ((v >> 2) & (T)~(T)0/15*3); // temp
+ // v = (v + (v >> 4)) & (T)~(T)0/255*15; // temp
+ // c = (T)(v * ((T)~(T)0/255)) >> (sizeof(T) - 1) * BITS_PER_BYTE; //count
+ //
+ // There are algorithms which are faster in the cases where very few
+ // bits are set but the algorithm here attempts to minimize the total
+ // number of instructions executed even when a large number of bits
+ // are set.
+ // The number of instruction is 20.
+ // uint32_t B0 = 0x55555555; // (T)~(T)0/3
+ // uint32_t B1 = 0x33333333; // (T)~(T)0/15*3
+ // uint32_t B2 = 0x0F0F0F0F; // (T)~(T)0/255*15
+ // uint32_t value = 0x01010101; // (T)~(T)0/255
+
+ uint32_t shift = 24;
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ Register value = temps.Acquire();
+ MOZ_ASSERT((rd != value) && (rs != value));
+ ma_li(value, 0x01010101); // value = 0x01010101;
+ ma_li(scratch2, 0x55555555); // B0 = 0x55555555;
+ ma_srl32(scratch, rs, Operand(1));
+ ma_and(scratch, scratch, scratch2);
+ ma_sub32(scratch, rs, scratch);
+ ma_li(scratch2, 0x33333333); // B1 = 0x33333333;
+ slli(rd, scratch2, 4);
+ or_(scratch2, scratch2, rd);
+ ma_and(rd, scratch, scratch2);
+ ma_srl32(scratch, scratch, Operand(2));
+ ma_and(scratch, scratch, scratch2);
+ ma_add32(scratch, rd, scratch);
+ ma_srl32(rd, scratch, Operand(4));
+ ma_add32(rd, rd, scratch);
+ ma_li(scratch2, 0xF);
+ ma_mul32(scratch2, value, scratch2); // B2 = 0x0F0F0F0F;
+ ma_and(rd, rd, scratch2);
+ ma_mul32(rd, rd, value);
+ ma_srl32(rd, rd, Operand(shift));
+}
+
+#if JS_CODEGEN_RISCV64
+void MacroAssemblerRiscv64::Popcnt64(Register rd, Register rs,
+ Register scratch) {
+ MOZ_ASSERT(scratch != rs);
+ MOZ_ASSERT(scratch != rd);
+ // uint64_t B0 = 0x5555555555555555l; // (T)~(T)0/3
+ // uint64_t B1 = 0x3333333333333333l; // (T)~(T)0/15*3
+ // uint64_t B2 = 0x0F0F0F0F0F0F0F0Fl; // (T)~(T)0/255*15
+ // uint64_t value = 0x0101010101010101l; // (T)~(T)0/255
+ // uint64_t shift = 24; // (sizeof(T) - 1) * BITS_PER_BYTE
+ uint64_t shift = 24;
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ Register value = temps.Acquire();
+ MOZ_ASSERT((rd != value) && (rs != value));
+ ma_li(value, 0x1111111111111111l); // value = 0x1111111111111111l;
+ ma_li(scratch2, 5);
+ ma_mul64(scratch2, value, scratch2); // B0 = 0x5555555555555555l;
+ ma_srl64(scratch, rs, Operand(1));
+ ma_and(scratch, scratch, scratch2);
+ ma_sub64(scratch, rs, scratch);
+ ma_li(scratch2, 3);
+ ma_mul64(scratch2, value, scratch2); // B1 = 0x3333333333333333l;
+ ma_and(rd, scratch, scratch2);
+ ma_srl64(scratch, scratch, Operand(2));
+ ma_and(scratch, scratch, scratch2);
+ ma_add64(scratch, rd, scratch);
+ ma_srl64(rd, scratch, Operand(4));
+ ma_add64(rd, rd, scratch);
+ ma_li(scratch2, 0xF);
+ ma_li(value, 0x0101010101010101l); // value = 0x0101010101010101l;
+ ma_mul64(scratch2, value, scratch2); // B2 = 0x0F0F0F0F0F0F0F0Fl;
+ ma_and(rd, rd, scratch2);
+ ma_mul64(rd, rd, value);
+ srli(rd, rd, 32 + shift);
+}
+#endif
+
+void MacroAssemblerRiscv64::ma_div_branch_overflow(Register rd, Register rj,
+ Register rk,
+ Label* overflow) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_mod32(scratch, rj, rk);
+ ma_b(scratch, scratch, overflow, Assembler::NonZero);
+ divw(rd, rj, rk);
+}
+
+void MacroAssemblerRiscv64::ma_div_branch_overflow(Register rd, Register rj,
+ Imm32 imm, Label* overflow) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ma_li(scratch, imm);
+ ma_div_branch_overflow(rd, rj, scratch, overflow);
+}
+
+void MacroAssemblerRiscv64::ma_mod_mask(Register src, Register dest,
+ Register hold, Register remain,
+ int32_t shift, Label* negZero) {
+ // MATH:
+ // We wish to compute x % (1<<y) - 1 for a known constant, y.
+ // First, let b = (1<<y) and C = (1<<y)-1, then think of the 32 bit
+ // dividend as a number in base b, namely
+ // c_0*1 + c_1*b + c_2*b^2 ... c_n*b^n
+ // now, since both addition and multiplication commute with modulus,
+ // x % C == (c_0 + c_1*b + ... + c_n*b^n) % C ==
+ // (c_0 % C) + (c_1%C) * (b % C) + (c_2 % C) * (b^2 % C)...
+ // now, since b == C + 1, b % C == 1, and b^n % C == 1
+ // this means that the whole thing simplifies to:
+ // c_0 + c_1 + c_2 ... c_n % C
+ // each c_n can easily be computed by a shift/bitextract, and the modulus
+ // can be maintained by simply subtracting by C whenever the number gets
+ // over C.
+ int32_t mask = (1 << shift) - 1;
+ Label head, negative, sumSigned, done;
+
+ // hold holds -1 if the value was negative, 1 otherwise.
+ // remain holds the remaining bits that have not been processed
+ // SecondScratchReg serves as a temporary location to store extracted bits
+ // into as well as holding the trial subtraction as a temp value dest is
+ // the accumulator (and holds the final result)
+
+ // move the whole value into the remain.
+ or_(remain, src, zero);
+ // Zero out the dest.
+ ma_li(dest, Imm32(0));
+ // Set the hold appropriately.
+ ma_b(remain, remain, &negative, Signed, ShortJump);
+ ma_li(hold, Imm32(1));
+ ma_branch(&head, ShortJump);
+
+ bind(&negative);
+ ma_li(hold, Imm32(-1));
+ subw(remain, zero, remain);
+
+ // Begin the main loop.
+ bind(&head);
+
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+ // Extract the bottom bits into SecondScratchReg.
+ ma_and(scratch2, remain, Imm32(mask));
+ // Add those bits to the accumulator.
+ addw(dest, dest, scratch2);
+ // Do a trial subtraction
+ ma_sub32(scratch2, dest, Imm32(mask));
+ // If (sum - C) > 0, store sum - C back into sum, thus performing a
+ // modulus.
+ ma_b(scratch2, Register(scratch2), &sumSigned, Signed, ShortJump);
+ or_(dest, scratch2, zero);
+ bind(&sumSigned);
+ // Get rid of the bits that we extracted before.
+ srliw(remain, remain, shift);
+ // If the shift produced zero, finish, otherwise, continue in the loop.
+ ma_b(remain, remain, &head, NonZero, ShortJump);
+ // Check the hold to see if we need to negate the result.
+ ma_b(hold, hold, &done, NotSigned, ShortJump);
+
+ // If the hold was non-zero, negate the result to be in line with
+ // what JS wants
+ if (negZero != nullptr) {
+ // Jump out in case of negative zero.
+ ma_b(hold, hold, negZero, Zero);
+ subw(dest, zero, dest);
+ } else {
+ subw(dest, zero, dest);
+ }
+
+ bind(&done);
+}
+
+void MacroAssemblerRiscv64::ma_fmovz(FloatFormat fmt, FloatRegister fd,
+ FloatRegister fj, Register rk) {
+ Label done;
+ ma_b(rk, zero, &done, Assembler::NotEqual);
+ if (fmt == SingleFloat) {
+ fmv_s(fd, fj);
+ } else {
+ fmv_d(fd, fj);
+ }
+ bind(&done);
+}
+
+void MacroAssemblerRiscv64::ByteSwap(Register rd, Register rs, int operand_size,
+ Register scratch) {
+ MOZ_ASSERT(scratch != rs);
+ MOZ_ASSERT(scratch != rd);
+ MOZ_ASSERT(operand_size == 4 || operand_size == 8);
+ if (operand_size == 4) {
+ // Uint32_t x1 = 0x00FF00FF;
+ // x0 = (x0 << 16 | x0 >> 16);
+ // x0 = (((x0 & x1) << 8) | ((x0 & (x1 << 8)) >> 8));
+ UseScratchRegisterScope temps(this);
+ BlockTrampolinePoolScope block_trampoline_pool(this, 17);
+ MOZ_ASSERT((rd != t6) && (rs != t6));
+ Register x0 = temps.Acquire();
+ Register x1 = temps.Acquire();
+ Register x2 = scratch;
+ RV_li(x1, 0x00FF00FF);
+ slliw(x0, rs, 16);
+ srliw(rd, rs, 16);
+ or_(x0, rd, x0); // x0 <- x0 << 16 | x0 >> 16
+ and_(x2, x0, x1); // x2 <- x0 & 0x00FF00FF
+ slliw(x2, x2, 8); // x2 <- (x0 & x1) << 8
+ slliw(x1, x1, 8); // x1 <- 0xFF00FF00
+ and_(rd, x0, x1); // x0 & 0xFF00FF00
+ srliw(rd, rd, 8);
+ or_(rd, rd, x2); // (((x0 & x1) << 8) | ((x0 & (x1 << 8)) >> 8))
+ } else {
+ // uinx24_t x1 = 0x0000FFFF0000FFFFl;
+ // uinx24_t x1 = 0x00FF00FF00FF00FFl;
+ // x0 = (x0 << 32 | x0 >> 32);
+ // x0 = (x0 & x1) << 16 | (x0 & (x1 << 16)) >> 16;
+ // x0 = (x0 & x1) << 8 | (x0 & (x1 << 8)) >> 8;
+ UseScratchRegisterScope temps(this);
+ BlockTrampolinePoolScope block_trampoline_pool(this, 30);
+ MOZ_ASSERT((rd != t6) && (rs != t6));
+ Register x0 = temps.Acquire();
+ Register x1 = temps.Acquire();
+ Register x2 = scratch;
+ RV_li(x1, 0x0000FFFF0000FFFFl);
+ slli(x0, rs, 32);
+ srli(rd, rs, 32);
+ or_(x0, rd, x0); // x0 <- x0 << 32 | x0 >> 32
+ and_(x2, x0, x1); // x2 <- x0 & 0x0000FFFF0000FFFF
+ slli(x2, x2, 16); // x2 <- (x0 & 0x0000FFFF0000FFFF) << 16
+ slli(x1, x1, 16); // x1 <- 0xFFFF0000FFFF0000
+ and_(rd, x0, x1); // rd <- x0 & 0xFFFF0000FFFF0000
+ srli(rd, rd, 16); // rd <- x0 & (x1 << 16)) >> 16
+ or_(x0, rd, x2); // (x0 & x1) << 16 | (x0 & (x1 << 16)) >> 16;
+ RV_li(x1, 0x00FF00FF00FF00FFl);
+ and_(x2, x0, x1); // x2 <- x0 & 0x00FF00FF00FF00FF
+ slli(x2, x2, 8); // x2 <- (x0 & x1) << 8
+ slli(x1, x1, 8); // x1 <- 0xFF00FF00FF00FF00
+ and_(rd, x0, x1);
+ srli(rd, rd, 8); // rd <- (x0 & (x1 << 8)) >> 8
+ or_(rd, rd, x2); // (((x0 & x1) << 8) | ((x0 & (x1 << 8)) >> 8))
+ }
+}
+
+template <typename F_TYPE>
+void MacroAssemblerRiscv64::FloatMinMaxHelper(FPURegister dst, FPURegister src1,
+ FPURegister src2,
+ MaxMinKind kind) {
+ MOZ_ASSERT((std::is_same<F_TYPE, float>::value) ||
+ (std::is_same<F_TYPE, double>::value));
+
+ if (src1 == src2 && dst != src1) {
+ if (std::is_same<float, F_TYPE>::value) {
+ fmv_s(dst, src1);
+ } else {
+ fmv_d(dst, src1);
+ }
+ return;
+ }
+
+ Label done, nan;
+
+ // For RISCV, fmin_s returns the other non-NaN operand as result if only one
+ // operand is NaN; but for JS, if any operand is NaN, result is Nan. The
+ // following handles the discrepency between handling of NaN between ISA and
+ // JS semantics
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ if (std::is_same<float, F_TYPE>::value) {
+ CompareIsNotNanF32(scratch, src1, src2);
+ } else {
+ CompareIsNotNanF64(scratch, src1, src2);
+ }
+ BranchFalseF(scratch, &nan);
+
+ if (kind == MaxMinKind::kMax) {
+ if (std::is_same<float, F_TYPE>::value) {
+ fmax_s(dst, src1, src2);
+ } else {
+ fmax_d(dst, src1, src2);
+ }
+ } else {
+ if (std::is_same<float, F_TYPE>::value) {
+ fmin_s(dst, src1, src2);
+ } else {
+ fmin_d(dst, src1, src2);
+ }
+ }
+ jump(&done);
+
+ bind(&nan);
+ // if any operand is NaN, return NaN (fadd returns NaN if any operand is NaN)
+ if (std::is_same<float, F_TYPE>::value) {
+ fadd_s(dst, src1, src2);
+ } else {
+ fadd_d(dst, src1, src2);
+ }
+
+ bind(&done);
+}
+
+void MacroAssemblerRiscv64::Float32Max(FPURegister dst, FPURegister src1,
+ FPURegister src2) {
+ comment(__FUNCTION__);
+ FloatMinMaxHelper<float>(dst, src1, src2, MaxMinKind::kMax);
+}
+
+void MacroAssemblerRiscv64::Float32Min(FPURegister dst, FPURegister src1,
+ FPURegister src2) {
+ comment(__FUNCTION__);
+ FloatMinMaxHelper<float>(dst, src1, src2, MaxMinKind::kMin);
+}
+
+void MacroAssemblerRiscv64::Float64Max(FPURegister dst, FPURegister src1,
+ FPURegister src2) {
+ comment(__FUNCTION__);
+ FloatMinMaxHelper<double>(dst, src1, src2, MaxMinKind::kMax);
+}
+
+void MacroAssemblerRiscv64::Float64Min(FPURegister dst, FPURegister src1,
+ FPURegister src2) {
+ comment(__FUNCTION__);
+ FloatMinMaxHelper<double>(dst, src1, src2, MaxMinKind::kMin);
+}
+
+void MacroAssemblerRiscv64::BranchTrueShortF(Register rs, Label* target) {
+ ma_branch(target, NotEqual, rs, Operand(zero_reg));
+}
+
+void MacroAssemblerRiscv64::BranchFalseShortF(Register rs, Label* target) {
+ ma_branch(target, Equal, rs, Operand(zero_reg));
+}
+
+void MacroAssemblerRiscv64::BranchTrueF(Register rs, Label* target) {
+ bool long_branch = target->bound() ? !is_near(target) : false;
+ if (long_branch) {
+ Label skip;
+ BranchFalseShortF(rs, &skip);
+ BranchLong(target);
+ bind(&skip);
+ } else {
+ BranchTrueShortF(rs, target);
+ }
+}
+
+void MacroAssemblerRiscv64::BranchFalseF(Register rs, Label* target) {
+ bool long_branch = target->bound() ? !is_near(target) : false;
+ if (long_branch) {
+ Label skip;
+ BranchTrueShortF(rs, &skip);
+ BranchLong(target);
+ bind(&skip);
+ } else {
+ BranchFalseShortF(rs, target);
+ }
+}
+
+void MacroAssemblerRiscv64::Ror(Register rd, Register rs, const Operand& rt) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ BlockTrampolinePoolScope block_trampoline_pool(this, 8);
+ if (rt.is_reg()) {
+ negw(scratch, rt.rm());
+ sllw(scratch, rs, scratch);
+ srlw(rd, rs, rt.rm());
+ or_(rd, scratch, rd);
+ sext_w(rd, rd);
+ } else {
+ int64_t ror_value = rt.immediate() % 32;
+ if (ror_value == 0) {
+ mv(rd, rs);
+ return;
+ } else if (ror_value < 0) {
+ ror_value += 32;
+ }
+ srliw(scratch, rs, ror_value);
+ slliw(rd, rs, 32 - ror_value);
+ or_(rd, scratch, rd);
+ sext_w(rd, rd);
+ }
+}
+
+void MacroAssemblerRiscv64::Dror(Register rd, Register rs, const Operand& rt) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ BlockTrampolinePoolScope block_trampoline_pool(this, 8);
+ if (rt.is_reg()) {
+ negw(scratch, rt.rm());
+ sll(scratch, rs, scratch);
+ srl(rd, rs, rt.rm());
+ or_(rd, scratch, rd);
+ } else {
+ int64_t dror_value = rt.immediate() % 64;
+ if (dror_value == 0) {
+ mv(rd, rs);
+ return;
+ } else if (dror_value < 0) {
+ dror_value += 64;
+ }
+ srli(scratch, rs, dror_value);
+ slli(rd, rs, 64 - dror_value);
+ or_(rd, scratch, rd);
+ }
+}
+
+void MacroAssemblerRiscv64::wasmLoadImpl(const wasm::MemoryAccessDesc& access,
+ Register memoryBase, Register ptr,
+ Register ptrScratch,
+ AnyRegister output, Register tmp) {
+ uint32_t offset = access.offset();
+ MOZ_ASSERT(offset < asMasm().wasmMaxOffsetGuardLimit());
+ MOZ_ASSERT_IF(offset, ptrScratch != InvalidReg);
+
+ // Maybe add the offset.
+ if (offset) {
+ asMasm().addPtr(ImmWord(offset), ptrScratch);
+ ptr = ptrScratch;
+ }
+
+ asMasm().memoryBarrierBefore(access.sync());
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ switch (access.type()) {
+ case Scalar::Int8:
+ add(scratch, memoryBase, ptr);
+ lb(output.gpr(), scratch, 0);
+ break;
+ case Scalar::Uint8:
+ add(scratch, memoryBase, ptr);
+ lbu(output.gpr(), scratch, 0);
+ break;
+ case Scalar::Int16:
+ add(scratch, memoryBase, ptr);
+ lh(output.gpr(), scratch, 0);
+ break;
+ case Scalar::Uint16:
+ add(scratch, memoryBase, ptr);
+ lhu(output.gpr(), scratch, 0);
+ break;
+ case Scalar::Int32:
+ add(scratch, memoryBase, ptr);
+ lw(output.gpr(), scratch, 0);
+ break;
+ case Scalar::Uint32:
+ add(scratch, memoryBase, ptr);
+ lwu(output.gpr(), scratch, 0);
+ break;
+ case Scalar::Float64:
+ add(scratch, memoryBase, ptr);
+ fld(output.fpu(), scratch, 0);
+ break;
+ case Scalar::Float32:
+ add(scratch, memoryBase, ptr);
+ flw(output.fpu(), scratch, 0);
+ break;
+ default:
+ MOZ_CRASH("unexpected array type");
+ }
+
+ asMasm().append(access, asMasm().size() - 4);
+ asMasm().memoryBarrierAfter(access.sync());
+}
+
+void MacroAssemblerRiscv64::wasmStoreImpl(const wasm::MemoryAccessDesc& access,
+ AnyRegister value,
+ Register memoryBase, Register ptr,
+ Register ptrScratch, Register tmp) {
+ uint32_t offset = access.offset();
+ MOZ_ASSERT(offset < asMasm().wasmMaxOffsetGuardLimit());
+ MOZ_ASSERT_IF(offset, ptrScratch != InvalidReg);
+
+ // Maybe add the offset.
+ if (offset) {
+ asMasm().addPtr(ImmWord(offset), ptrScratch);
+ ptr = ptrScratch;
+ }
+
+ unsigned byteSize = access.byteSize();
+ bool isSigned;
+ bool isFloat = false;
+
+ switch (access.type()) {
+ case Scalar::Int8:
+ isSigned = true;
+ break;
+ case Scalar::Uint8:
+ isSigned = false;
+ break;
+ case Scalar::Int16:
+ isSigned = true;
+ break;
+ case Scalar::Uint16:
+ isSigned = false;
+ break;
+ case Scalar::Int32:
+ isSigned = true;
+ break;
+ case Scalar::Uint32:
+ isSigned = false;
+ break;
+ case Scalar::Int64:
+ isSigned = true;
+ break;
+ case Scalar::Float64:
+ isFloat = true;
+ break;
+ case Scalar::Float32:
+ isFloat = true;
+ break;
+ default:
+ MOZ_CRASH("unexpected array type");
+ }
+
+ BaseIndex address(memoryBase, ptr, TimesOne);
+ asMasm().memoryBarrierBefore(access.sync());
+ if (isFloat) {
+ if (byteSize == 4) {
+ asMasm().ma_fst_s(value.fpu(), address);
+ } else {
+ asMasm().ma_fst_d(value.fpu(), address);
+ }
+ } else {
+ asMasm().ma_store(value.gpr(), address,
+ static_cast<LoadStoreSize>(8 * byteSize),
+ isSigned ? SignExtend : ZeroExtend);
+ }
+ // Only the last emitted instruction is a memory access.
+ asMasm().append(access, asMasm().size() - 4);
+ asMasm().memoryBarrierAfter(access.sync());
+}
+
+void MacroAssemblerRiscv64::GenPCRelativeJumpAndLink(Register rd,
+ int32_t imm32) {
+ MOZ_ASSERT(is_int32(imm32 + 0x800));
+ int32_t Hi20 = ((imm32 + 0x800) >> 12);
+ int32_t Lo12 = imm32 << 20 >> 20;
+ auipc(rd, Hi20); // Read PC + Hi20 into scratch.
+ jalr(rd, Lo12); // jump PC + Hi20 + Lo12
+}
+
+void MacroAssemblerRiscv64::BranchAndLinkShortHelper(int32_t offset, Label* L) {
+ MOZ_ASSERT(L == nullptr || offset == 0);
+ offset = GetOffset(offset, L, OffsetSize::kOffset21);
+ jal(offset);
+}
+
+void MacroAssemblerRiscv64::BranchAndLinkShort(int32_t offset) {
+ MOZ_ASSERT(is_int21(offset));
+ BranchAndLinkShortHelper(offset, nullptr);
+}
+
+void MacroAssemblerRiscv64::BranchAndLinkShort(Label* L) {
+ BranchAndLinkShortHelper(0, L);
+}
+
+void MacroAssemblerRiscv64::BranchAndLink(Label* L) {
+ if (L->bound()) {
+ if (is_near(L)) {
+ BranchAndLinkShort(L);
+ } else {
+ BranchAndLinkLong(L);
+ }
+ } else {
+ BranchAndLinkShort(L);
+ }
+}
+
+void MacroAssemblerRiscv64::ma_fmv_d(FloatRegister src, ValueOperand dest) {
+ fmv_x_d(dest.valueReg(), src);
+}
+
+void MacroAssemblerRiscv64::ma_fmv_d(ValueOperand src, FloatRegister dest) {
+ fmv_d_x(dest, src.valueReg());
+}
+
+void MacroAssemblerRiscv64::ma_fmv_w(FloatRegister src, ValueOperand dest) {
+ fmv_x_w(dest.valueReg(), src);
+}
+
+void MacroAssemblerRiscv64::ma_fmv_w(ValueOperand src, FloatRegister dest) {
+ fmv_w_x(dest, src.valueReg());
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/riscv64/MacroAssembler-riscv64.h b/js/src/jit/riscv64/MacroAssembler-riscv64.h
new file mode 100644
index 0000000000..f0e567ece7
--- /dev/null
+++ b/js/src/jit/riscv64/MacroAssembler-riscv64.h
@@ -0,0 +1,1224 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#ifndef jit_riscv64_MacroAssembler_riscv64_h
+#define jit_riscv64_MacroAssembler_riscv64_h
+
+#include <iterator>
+
+#include "jit/MoveResolver.h"
+#include "jit/riscv64/Assembler-riscv64.h"
+#include "wasm/WasmTypeDecls.h"
+
+namespace js {
+namespace jit {
+
+static Register CallReg = t6;
+
+enum LiFlags {
+ Li64 = 0,
+ Li48 = 1,
+};
+
+class CompactBufferReader;
+enum LoadStoreSize {
+ SizeByte = 8,
+ SizeHalfWord = 16,
+ SizeWord = 32,
+ SizeDouble = 64
+};
+
+enum LoadStoreExtension { ZeroExtend = 0, SignExtend = 1 };
+enum JumpKind { LongJump = 0, ShortJump = 1 };
+enum FloatFormat { SingleFloat, DoubleFloat };
+class ScratchTagScope : public ScratchRegisterScope {
+ public:
+ ScratchTagScope(MacroAssembler& masm, const ValueOperand&)
+ : ScratchRegisterScope(masm) {}
+};
+
+class ScratchTagScopeRelease {
+ ScratchTagScope* ts_;
+
+ public:
+ explicit ScratchTagScopeRelease(ScratchTagScope* ts) : ts_(ts) {
+ ts_->release();
+ }
+ ~ScratchTagScopeRelease() { ts_->reacquire(); }
+};
+
+struct ImmTag : public Imm32 {
+ ImmTag(JSValueTag mask) : Imm32(int32_t(mask)) {}
+};
+
+class MacroAssemblerRiscv64 : public Assembler {
+ public:
+ MacroAssemblerRiscv64() {}
+
+#ifdef JS_SIMULATOR_RISCV64
+ // See riscv64/base-constants-riscv.h DebugParameters.
+ void Debug(uint32_t parameters) { break_(parameters, false); }
+#endif
+
+ // Perform a downcast. Should be removed by Bug 996602.
+ MacroAssembler& asMasm();
+ const MacroAssembler& asMasm() const;
+
+ MoveResolver moveResolver_;
+
+ static bool SupportsFloatingPoint() { return true; }
+ static bool SupportsUnalignedAccesses() { return true; }
+ static bool SupportsFastUnalignedFPAccesses() { return true; }
+ void haltingAlign(int alignment) {
+ // TODO(loong64): Implement a proper halting align.
+ nopAlign(alignment);
+ }
+
+ // TODO(RISCV) Reorder parameters so out parameters come last.
+ bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits);
+ int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits);
+
+ inline void GenPCRelativeJump(Register rd, int32_t imm32) {
+ MOZ_ASSERT(is_int32(imm32 + 0x800));
+ int32_t Hi20 = ((imm32 + 0x800) >> 12);
+ int32_t Lo12 = imm32 << 20 >> 20;
+ auipc(rd, Hi20); // Read PC + Hi20 into scratch.
+ jr(rd, Lo12); // jump PC + Hi20 + Lo12
+ }
+
+ // load
+ void ma_load(Register dest, Address address, LoadStoreSize size = SizeWord,
+ LoadStoreExtension extension = SignExtend);
+ void ma_load(Register dest, const BaseIndex& src,
+ LoadStoreSize size = SizeWord,
+ LoadStoreExtension extension = SignExtend);
+ void ma_loadDouble(FloatRegister dest, Address address);
+ void ma_loadFloat(FloatRegister dest, Address address);
+ // store
+ void ma_store(Register data, Address address, LoadStoreSize size = SizeWord,
+ LoadStoreExtension extension = SignExtend);
+ void ma_store(Register data, const BaseIndex& dest,
+ LoadStoreSize size = SizeWord,
+ LoadStoreExtension extension = SignExtend);
+ void ma_store(Imm32 imm, const BaseIndex& dest, LoadStoreSize size = SizeWord,
+ LoadStoreExtension extension = SignExtend);
+ void ma_store(Imm32 imm, Address address, LoadStoreSize size = SizeWord,
+ LoadStoreExtension extension = SignExtend);
+ void ma_storeDouble(FloatRegister dest, Address address);
+ void ma_storeFloat(FloatRegister dest, Address address);
+ void ma_liPatchable(Register dest, Imm32 imm);
+ void ma_liPatchable(Register dest, ImmPtr imm);
+ void ma_liPatchable(Register dest, ImmWord imm, LiFlags flags = Li48);
+ void ma_li(Register dest, ImmGCPtr ptr);
+ void ma_li(Register dest, Imm32 imm);
+ void ma_li(Register dest, Imm64 imm);
+ void ma_li(Register dest, intptr_t imm) { RV_li(dest, imm); }
+ void ma_li(Register dest, CodeLabel* label);
+ void ma_li(Register dest, ImmWord imm);
+
+ // branches when done from within la-specific code
+ void ma_b(Register lhs, Register rhs, Label* l, Condition c,
+ JumpKind jumpKind = LongJump);
+ void ma_b(Register lhs, Imm32 imm, Label* l, Condition c,
+ JumpKind jumpKind = LongJump);
+ void BranchAndLinkShort(Label* L);
+ void BranchAndLink(Label* label);
+ void BranchAndLinkShort(int32_t offset);
+ void BranchAndLinkShortHelper(int32_t offset, Label* L);
+ void BranchAndLinkLong(Label* L);
+ void GenPCRelativeJumpAndLink(Register rd, int32_t imm32);
+
+#define DEFINE_INSTRUCTION(instr) \
+ void instr(Register rd, Register rj, Operand rt); \
+ void instr(Register rd, Register rj, Imm32 imm) { \
+ instr(rd, rj, Operand(imm.value)); \
+ } \
+ void instr(Register rd, Imm32 imm) { instr(rd, rd, Operand(imm.value)); } \
+ void instr(Register rd, Register rs) { instr(rd, rd, Operand(rs)); }
+
+#define DEFINE_INSTRUCTION2(instr) \
+ void instr(Register rs, const Operand& rt); \
+ void instr(Register rs, Register rt) { instr(rs, Operand(rt)); } \
+ void instr(Register rs, Imm32 j) { instr(rs, Operand(j.value)); }
+
+ DEFINE_INSTRUCTION(ma_and);
+ DEFINE_INSTRUCTION(ma_or);
+ DEFINE_INSTRUCTION(ma_xor);
+ DEFINE_INSTRUCTION(ma_nor);
+ DEFINE_INSTRUCTION(ma_sub32)
+ DEFINE_INSTRUCTION(ma_sub64)
+ DEFINE_INSTRUCTION(ma_add32)
+ DEFINE_INSTRUCTION(ma_add64)
+ DEFINE_INSTRUCTION(ma_div32)
+ DEFINE_INSTRUCTION(ma_divu32)
+ DEFINE_INSTRUCTION(ma_div64)
+ DEFINE_INSTRUCTION(ma_divu64)
+ DEFINE_INSTRUCTION(ma_mod32)
+ DEFINE_INSTRUCTION(ma_modu32)
+ DEFINE_INSTRUCTION(ma_mod64)
+ DEFINE_INSTRUCTION(ma_modu64)
+ DEFINE_INSTRUCTION(ma_mul32)
+ DEFINE_INSTRUCTION(ma_mulh32)
+ DEFINE_INSTRUCTION(ma_mulhu32)
+ DEFINE_INSTRUCTION(ma_mul64)
+ DEFINE_INSTRUCTION(ma_mulh64)
+ DEFINE_INSTRUCTION(ma_sll64)
+ DEFINE_INSTRUCTION(ma_sra64)
+ DEFINE_INSTRUCTION(ma_srl64)
+ DEFINE_INSTRUCTION(ma_sll32)
+ DEFINE_INSTRUCTION(ma_sra32)
+ DEFINE_INSTRUCTION(ma_srl32)
+ DEFINE_INSTRUCTION(ma_slt)
+ DEFINE_INSTRUCTION(ma_sltu)
+ DEFINE_INSTRUCTION(ma_sle)
+ DEFINE_INSTRUCTION(ma_sleu)
+ DEFINE_INSTRUCTION(ma_sgt)
+ DEFINE_INSTRUCTION(ma_sgtu)
+ DEFINE_INSTRUCTION(ma_sge)
+ DEFINE_INSTRUCTION(ma_sgeu)
+ DEFINE_INSTRUCTION(ma_seq)
+ DEFINE_INSTRUCTION(ma_sne)
+
+ DEFINE_INSTRUCTION2(ma_seqz)
+ DEFINE_INSTRUCTION2(ma_snez)
+ DEFINE_INSTRUCTION2(ma_neg);
+
+#undef DEFINE_INSTRUCTION2
+#undef DEFINE_INSTRUCTION
+ // arithmetic based ops
+ void ma_add32TestOverflow(Register rd, Register rj, Register rk,
+ Label* overflow);
+ void ma_add32TestOverflow(Register rd, Register rj, Imm32 imm,
+ Label* overflow);
+ void ma_addPtrTestOverflow(Register rd, Register rj, Register rk,
+ Label* overflow);
+ void ma_addPtrTestOverflow(Register rd, Register rj, Imm32 imm,
+ Label* overflow);
+ void ma_addPtrTestOverflow(Register rd, Register rj, ImmWord imm,
+ Label* overflow);
+ void ma_addPtrTestCarry(Condition cond, Register rd, Register rj, Register rk,
+ Label* overflow);
+ void ma_addPtrTestCarry(Condition cond, Register rd, Register rj, Imm32 imm,
+ Label* overflow);
+ void ma_addPtrTestCarry(Condition cond, Register rd, Register rj, ImmWord imm,
+ Label* overflow);
+
+ // subtract
+ void ma_sub32TestOverflow(Register rd, Register rj, Register rk,
+ Label* overflow);
+ void ma_subPtrTestOverflow(Register rd, Register rj, Register rk,
+ Label* overflow);
+ void ma_subPtrTestOverflow(Register rd, Register rj, Imm32 imm,
+ Label* overflow);
+
+ // multiplies. For now, there are only few that we care about.
+ void ma_mulPtrTestOverflow(Register rd, Register rj, Register rk,
+ Label* overflow);
+
+ // branches when done from within la-specific code
+ void ma_b(Register lhs, ImmWord imm, Label* l, Condition c,
+ JumpKind jumpKind = LongJump);
+ void ma_b(Register lhs, ImmPtr imm, Label* l, Condition c,
+ JumpKind jumpKind = LongJump);
+ void ma_b(Register lhs, ImmGCPtr imm, Label* l, Condition c,
+ JumpKind jumpKind = LongJump) {
+ UseScratchRegisterScope temps(this);
+ Register ScratchRegister = temps.Acquire();
+ ma_li(ScratchRegister, imm);
+ ma_b(lhs, ScratchRegister, l, c, jumpKind);
+ }
+ void ma_b(Register lhs, Address addr, Label* l, Condition c,
+ JumpKind jumpKind = LongJump);
+ void ma_b(Address addr, Imm32 imm, Label* l, Condition c,
+ JumpKind jumpKind = LongJump);
+ void ma_b(Address addr, ImmGCPtr imm, Label* l, Condition c,
+ JumpKind jumpKind = LongJump);
+ void ma_b(Address addr, Register rhs, Label* l, Condition c,
+ JumpKind jumpKind = LongJump) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ MOZ_ASSERT(rhs != scratch);
+ ma_load(scratch, addr, SizeDouble);
+ ma_b(scratch, rhs, l, c, jumpKind);
+ }
+
+ void ma_branch(Label* target, Condition cond, Register r1, const Operand& r2,
+ JumpKind jumpKind = ShortJump);
+
+ void ma_branch(Label* target, JumpKind jumpKind = ShortJump) {
+ ma_branch(target, Always, zero, zero, jumpKind);
+ }
+
+ // fp instructions
+ void ma_lid(FloatRegister dest, double value);
+
+ // fp instructions
+ void ma_lis(FloatRegister dest, float value);
+
+ void ma_fst_d(FloatRegister src, BaseIndex address);
+ void ma_fst_s(FloatRegister src, BaseIndex address);
+
+ void ma_fld_d(FloatRegister dest, const BaseIndex& src);
+ void ma_fld_s(FloatRegister dest, const BaseIndex& src);
+
+ void ma_fmv_d(FloatRegister src, ValueOperand dest);
+ void ma_fmv_d(ValueOperand src, FloatRegister dest);
+
+ void ma_fmv_w(FloatRegister src, ValueOperand dest);
+ void ma_fmv_w(ValueOperand src, FloatRegister dest);
+
+ void ma_fld_s(FloatRegister ft, Address address);
+ void ma_fld_d(FloatRegister ft, Address address);
+ void ma_fst_d(FloatRegister ft, Address address);
+ void ma_fst_s(FloatRegister ft, Address address);
+
+ // stack
+ void ma_pop(Register r);
+ void ma_push(Register r);
+ void ma_pop(FloatRegister f);
+ void ma_push(FloatRegister f);
+
+ Condition ma_cmp(Register rd, Register lhs, Register rhs, Condition c);
+ Condition ma_cmp(Register rd, Register lhs, Imm32 imm, Condition c);
+ void ma_cmp_set(Register dst, Register lhs, ImmWord imm, Condition c);
+ void ma_cmp_set(Register dst, Register lhs, ImmPtr imm, Condition c);
+ void ma_cmp_set(Register dst, Address address, Imm32 imm, Condition c);
+ void ma_cmp_set(Register dst, Address address, ImmWord imm, Condition c);
+
+ void ma_rotr_w(Register rd, Register rj, Imm32 shift);
+
+ void ma_fmovz(FloatFormat fmt, FloatRegister fd, FloatRegister fj,
+ Register rk);
+ void ma_fmovn(FloatFormat fmt, FloatRegister fd, FloatRegister fj,
+ Register rk);
+
+ // arithmetic based ops
+ void ma_add32TestCarry(Condition cond, Register rd, Register rj, Register rk,
+ Label* overflow);
+ void ma_add32TestCarry(Condition cond, Register rd, Register rj, Imm32 imm,
+ Label* overflow);
+
+ // subtract
+ void ma_sub32TestOverflow(Register rd, Register rj, Imm32 imm,
+ Label* overflow);
+
+ void MulOverflow32(Register dst, Register left, const Operand& right,
+ Register overflow);
+ // multiplies. For now, there are only few that we care about.
+ void ma_mul32TestOverflow(Register rd, Register rj, Register rk,
+ Label* overflow);
+ void ma_mul32TestOverflow(Register rd, Register rj, Imm32 imm,
+ Label* overflow);
+
+ // divisions
+ void ma_div_branch_overflow(Register rd, Register rj, Register rk,
+ Label* overflow);
+ void ma_div_branch_overflow(Register rd, Register rj, Imm32 imm,
+ Label* overflow);
+
+ // fast mod, uses scratch registers, and thus needs to be in the assembler
+ // implicitly assumes that we can overwrite dest at the beginning of the
+ // sequence
+ void ma_mod_mask(Register src, Register dest, Register hold, Register remain,
+ int32_t shift, Label* negZero = nullptr);
+
+ // FP branches
+ void ma_compareF32(Register rd, DoubleCondition cc, FloatRegister cmp1,
+ FloatRegister cmp2);
+ void ma_compareF64(Register rd, DoubleCondition cc, FloatRegister cmp1,
+ FloatRegister cmp2);
+
+ void CompareIsNotNanF32(Register rd, FPURegister cmp1, FPURegister cmp2);
+ void CompareIsNotNanF64(Register rd, FPURegister cmp1, FPURegister cmp2);
+ void CompareIsNanF32(Register rd, FPURegister cmp1, FPURegister cmp2);
+ void CompareIsNanF64(Register rd, FPURegister cmp1, FPURegister cmp2);
+
+ void ma_call(ImmPtr dest);
+
+ void ma_jump(ImmPtr dest);
+
+ void jump(Label* label) { ma_branch(label); }
+ void jump(Register reg) { jr(reg); }
+
+ void ma_cmp_set(Register dst, Register lhs, Register rhs, Condition c);
+ void ma_cmp_set(Register dst, Register lhs, Imm32 imm, Condition c);
+
+ void computeScaledAddress(const BaseIndex& address, Register dest);
+
+ void BranchShort(Label* L);
+
+ void BranchShort(int32_t offset, Condition cond, Register rs,
+ const Operand& rt);
+ void BranchShort(Label* L, Condition cond, Register rs, const Operand& rt);
+ void BranchShortHelper(int32_t offset, Label* L);
+ bool BranchShortHelper(int32_t offset, Label* L, Condition cond, Register rs,
+ const Operand& rt);
+ bool BranchShortCheck(int32_t offset, Label* L, Condition cond, Register rs,
+ const Operand& rt);
+ void BranchLong(Label* L);
+
+ // Floating point branches
+ void BranchTrueShortF(Register rs, Label* target);
+ void BranchFalseShortF(Register rs, Label* target);
+
+ void BranchTrueF(Register rs, Label* target);
+ void BranchFalseF(Register rs, Label* target);
+
+ void moveFromDoubleHi(FloatRegister src, Register dest) {
+ fmv_x_d(dest, src);
+ srli(dest, dest, 32);
+ }
+ // Bit field starts at bit pos and extending for size bits is extracted from
+ // rs and stored zero/sign-extended and right-justified in rt
+ void ExtractBits(Register rt, Register rs, uint16_t pos, uint16_t size,
+ bool sign_extend = false);
+ void ExtractBits(Register dest, Register source, Register pos, int size,
+ bool sign_extend = false) {
+ sra(dest, source, pos);
+ ExtractBits(dest, dest, 0, size, sign_extend);
+ }
+
+ // Insert bits [0, size) of source to bits [pos, pos+size) of dest
+ void InsertBits(Register dest, Register source, Register pos, int size);
+
+ // Insert bits [0, size) of source to bits [pos, pos+size) of dest
+ void InsertBits(Register dest, Register source, int pos, int size);
+
+ template <typename F_TYPE>
+ void RoundHelper(FPURegister dst, FPURegister src, FPURegister fpu_scratch,
+ FPURoundingMode mode);
+
+ template <typename TruncFunc>
+ void RoundFloatingPointToInteger(Register rd, FPURegister fs, Register result,
+ TruncFunc trunc, bool Inexact = false);
+
+ void Clear_if_nan_d(Register rd, FPURegister fs);
+ void Clear_if_nan_s(Register rd, FPURegister fs);
+ // Convert double to unsigned word.
+ void Trunc_uw_d(Register rd, FPURegister fs, Register result = InvalidReg,
+ bool Inexact = false);
+
+ // Convert double to signed word.
+ void Trunc_w_d(Register rd, FPURegister fs, Register result = InvalidReg,
+ bool Inexact = false);
+
+ // Convert double to unsigned long.
+ void Trunc_ul_d(Register rd, FPURegister fs, Register result = InvalidReg,
+ bool Inexact = false);
+
+ // Convert singled to signed long.
+ void Trunc_l_d(Register rd, FPURegister fs, Register result = InvalidReg,
+ bool Inexact = false);
+
+ // Convert single to signed word.
+ void Trunc_w_s(Register rd, FPURegister fs, Register result = InvalidReg,
+ bool Inexact = false);
+
+ // Convert single to unsigned word.
+ void Trunc_uw_s(Register rd, FPURegister fs, Register result = InvalidReg,
+ bool Inexact = false);
+
+ // Convert single to unsigned long.
+ void Trunc_ul_s(Register rd, FPURegister fs, Register result = InvalidReg,
+ bool Inexact = false);
+
+ // Convert singled to signed long.
+ void Trunc_l_s(Register rd, FPURegister fs, Register result = InvalidReg,
+ bool Inexact = false);
+
+ // Round double functions
+ void Trunc_d_d(FPURegister fd, FPURegister fs, FPURegister fpu_scratch);
+ void Round_d_d(FPURegister fd, FPURegister fs, FPURegister fpu_scratch);
+ void Floor_d_d(FPURegister fd, FPURegister fs, FPURegister fpu_scratch);
+ void Ceil_d_d(FPURegister fd, FPURegister fs, FPURegister fpu_scratch);
+
+ // Round float functions
+ void Trunc_s_s(FPURegister fd, FPURegister fs, FPURegister fpu_scratch);
+ void Round_s_s(FPURegister fd, FPURegister fs, FPURegister fpu_scratch);
+ void Floor_s_s(FPURegister fd, FPURegister fs, FPURegister fpu_scratch);
+ void Ceil_s_s(FPURegister fd, FPURegister fs, FPURegister fpu_scratch);
+
+ // Round single to signed word.
+ void Round_w_s(Register rd, FPURegister fs, Register result = InvalidReg,
+ bool Inexact = false);
+
+ // Round double to signed word.
+ void Round_w_d(Register rd, FPURegister fs, Register result = InvalidReg,
+ bool Inexact = false);
+
+ // Ceil single to signed word.
+ void Ceil_w_s(Register rd, FPURegister fs, Register result = InvalidReg,
+ bool Inexact = false);
+
+ // Ceil double to signed word.
+ void Ceil_w_d(Register rd, FPURegister fs, Register result = InvalidReg,
+ bool Inexact = false);
+
+ // Floor single to signed word.
+ void Floor_w_s(Register rd, FPURegister fs, Register result = InvalidReg,
+ bool Inexact = false);
+
+ // Floor double to signed word.
+ void Floor_w_d(Register rd, FPURegister fs, Register result = InvalidReg,
+ bool Inexact = false);
+
+ void Clz32(Register rd, Register rs);
+ void Ctz32(Register rd, Register rs);
+ void Popcnt32(Register rd, Register rs, Register scratch);
+
+ void Popcnt64(Register rd, Register rs, Register scratch);
+ void Ctz64(Register rd, Register rs);
+ void Clz64(Register rd, Register rs);
+
+ // Change endianness
+ void ByteSwap(Register dest, Register src, int operand_size,
+ Register scratch);
+
+ void Ror(Register rd, Register rs, const Operand& rt);
+ void Dror(Register rd, Register rs, const Operand& rt);
+
+ void Float32Max(FPURegister dst, FPURegister src1, FPURegister src2);
+ void Float32Min(FPURegister dst, FPURegister src1, FPURegister src2);
+ void Float64Max(FPURegister dst, FPURegister src1, FPURegister src2);
+ void Float64Min(FPURegister dst, FPURegister src1, FPURegister src2);
+
+ template <typename F>
+ void FloatMinMaxHelper(FPURegister dst, FPURegister src1, FPURegister src2,
+ MaxMinKind kind);
+
+ inline void NegateBool(Register rd, Register rs) { xori(rd, rs, 1); }
+
+ protected:
+ void wasmLoadImpl(const wasm::MemoryAccessDesc& access, Register memoryBase,
+ Register ptr, Register ptrScratch, AnyRegister output,
+ Register tmp);
+ void wasmStoreImpl(const wasm::MemoryAccessDesc& access, AnyRegister value,
+ Register memoryBase, Register ptr, Register ptrScratch,
+ Register tmp);
+};
+
+class MacroAssemblerRiscv64Compat : public MacroAssemblerRiscv64 {
+ public:
+ using MacroAssemblerRiscv64::call;
+
+ MacroAssemblerRiscv64Compat() {}
+
+ void convertBoolToInt32(Register src, Register dest) {
+ ma_and(dest, src, Imm32(0xff));
+ };
+ void convertInt32ToDouble(Register src, FloatRegister dest) {
+ fcvt_d_w(dest, src);
+ };
+ void convertInt32ToDouble(const Address& src, FloatRegister dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ma_load(scratch, src, SizeWord, SignExtend);
+ fcvt_d_w(dest, scratch);
+ };
+ void convertInt32ToDouble(const BaseIndex& src, FloatRegister dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ MOZ_ASSERT(scratch != src.base);
+ MOZ_ASSERT(scratch != src.index);
+ computeScaledAddress(src, scratch);
+ convertInt32ToDouble(Address(scratch, src.offset), dest);
+ };
+ void convertUInt32ToDouble(Register src, FloatRegister dest);
+ void convertUInt32ToFloat32(Register src, FloatRegister dest);
+ void convertDoubleToFloat32(FloatRegister src, FloatRegister dest);
+ void convertDoubleToInt32(FloatRegister src, Register dest, Label* fail,
+ bool negativeZeroCheck = true);
+ void convertDoubleToPtr(FloatRegister src, Register dest, Label* fail,
+ bool negativeZeroCheck = true);
+ void convertFloat32ToInt32(FloatRegister src, Register dest, Label* fail,
+ bool negativeZeroCheck = true);
+
+ void convertFloat32ToDouble(FloatRegister src, FloatRegister dest);
+ void convertInt32ToFloat32(Register src, FloatRegister dest);
+ void convertInt32ToFloat32(const Address& src, FloatRegister dest);
+
+ void movq(Register rj, Register rd);
+
+ void computeEffectiveAddress(const Address& address, Register dest) {
+ ma_add64(dest, address.base, Imm32(address.offset));
+ }
+
+ void computeEffectiveAddress(const BaseIndex& address, Register dest) {
+ computeScaledAddress(address, dest);
+ if (address.offset) {
+ ma_add64(dest, dest, Imm32(address.offset));
+ }
+ }
+
+ void j(Label* dest) { ma_branch(dest); }
+
+ void mov(Register src, Register dest) { addi(dest, src, 0); }
+ void mov(ImmWord imm, Register dest) { ma_li(dest, imm); }
+ void mov(ImmPtr imm, Register dest) {
+ mov(ImmWord(uintptr_t(imm.value)), dest);
+ }
+ void mov(CodeLabel* label, Register dest) { ma_li(dest, label); }
+ void mov(Register src, Address dest) { MOZ_CRASH("NYI-IC"); }
+ void mov(Address src, Register dest) { MOZ_CRASH("NYI-IC"); }
+
+ void writeDataRelocation(const Value& val) {
+ // Raw GC pointer relocations and Value relocations both end up in
+ // TraceOneDataRelocation.
+ if (val.isGCThing()) {
+ gc::Cell* cell = val.toGCThing();
+ if (cell && gc::IsInsideNursery(cell)) {
+ embedsNurseryPointers_ = true;
+ }
+ dataRelocations_.writeUnsigned(currentOffset());
+ }
+ }
+
+ void branch(JitCode* c) {
+ BlockTrampolinePoolScope block_trampoline_pool(this, 7);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ BufferOffset bo = m_buffer.nextOffset();
+ addPendingJump(bo, ImmPtr(c->raw()), RelocationKind::JITCODE);
+ ma_liPatchable(scratch, ImmPtr(c->raw()));
+ jr(scratch);
+ }
+ void branch(const Register reg) { jr(reg); }
+ void ret() {
+ ma_pop(ra);
+ jalr(zero_reg, ra, 0);
+ }
+ inline void retn(Imm32 n);
+ void push(Imm32 imm) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ma_li(scratch, imm);
+ ma_push(scratch);
+ }
+ void push(ImmWord imm) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ma_li(scratch, imm);
+ ma_push(scratch);
+ }
+ void push(ImmGCPtr imm) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ma_li(scratch, imm);
+ ma_push(scratch);
+ }
+ void push(const Address& address) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ loadPtr(address, scratch);
+ ma_push(scratch);
+ }
+ void push(Register reg) { ma_push(reg); }
+ void push(FloatRegister reg) { ma_push(reg); }
+ void pop(Register reg) { ma_pop(reg); }
+ void pop(FloatRegister reg) { ma_pop(reg); }
+
+ // Emit a branch that can be toggled to a non-operation. On LOONG64 we use
+ // "andi" instruction to toggle the branch.
+ // See ToggleToJmp(), ToggleToCmp().
+ CodeOffset toggledJump(Label* label);
+
+ // Emit a "jalr" or "nop" instruction. ToggleCall can be used to patch
+ // this instruction.
+ CodeOffset toggledCall(JitCode* target, bool enabled);
+
+ static size_t ToggledCallSize(uint8_t* code) {
+ // Four instructions used in: MacroAssemblerRiscv64Compat::toggledCall
+ return 7 * sizeof(uint32_t);
+ }
+
+ CodeOffset pushWithPatch(ImmWord imm) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ CodeOffset offset = movWithPatch(imm, scratch);
+ ma_push(scratch);
+ return offset;
+ }
+
+ CodeOffset movWithPatch(ImmWord imm, Register dest) {
+ BlockTrampolinePoolScope block_trampoline_pool(this, 8);
+ CodeOffset offset = CodeOffset(currentOffset());
+ ma_liPatchable(dest, imm, Li64);
+ return offset;
+ }
+ CodeOffset movWithPatch(ImmPtr imm, Register dest) {
+ BlockTrampolinePoolScope block_trampoline_pool(this, 6);
+ CodeOffset offset = CodeOffset(currentOffset());
+ ma_liPatchable(dest, imm);
+ return offset;
+ }
+
+ void writeCodePointer(CodeLabel* label) {
+ label->patchAt()->bind(currentOffset());
+ label->setLinkMode(CodeLabel::RawPointer);
+ m_buffer.ensureSpace(sizeof(void*));
+ emit(uint32_t(-1));
+ emit(uint32_t(-1));
+ }
+
+ void jump(Label* label) { ma_branch(label); }
+ void jump(Register reg) { jr(reg); }
+ void jump(const Address& address) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ loadPtr(address, scratch);
+ jr(scratch);
+ }
+
+ void jump(JitCode* code) { branch(code); }
+
+ void jump(ImmPtr ptr) {
+ BufferOffset bo = m_buffer.nextOffset();
+ addPendingJump(bo, ptr, RelocationKind::HARDCODED);
+ ma_jump(ptr);
+ }
+
+ void jump(TrampolinePtr code) { jump(ImmPtr(code.value)); }
+
+ void splitTag(Register src, Register dest) {
+ srli(dest, src, JSVAL_TAG_SHIFT);
+ }
+
+ void splitTag(const ValueOperand& operand, Register dest) {
+ splitTag(operand.valueReg(), dest);
+ }
+
+ void splitTagForTest(const ValueOperand& value, ScratchTagScope& tag) {
+ splitTag(value, tag);
+ }
+
+ void moveIfZero(Register dst, Register src, Register cond) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(dst != scratch && cond != scratch);
+ Label done;
+ ma_branch(&done, NotEqual, cond, zero);
+ mv(dst, src);
+ bind(&done);
+ }
+
+ void moveIfNotZero(Register dst, Register src, Register cond) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(dst != scratch && cond != scratch);
+ Label done;
+ ma_branch(&done, Equal, cond, zero);
+ mv(dst, src);
+ bind(&done);
+ }
+ // unboxing code
+ void unboxNonDouble(const ValueOperand& operand, Register dest,
+ JSValueType type) {
+ unboxNonDouble(operand.valueReg(), dest, type);
+ }
+
+ template <typename T>
+ void unboxNonDouble(T src, Register dest, JSValueType type) {
+ MOZ_ASSERT(type != JSVAL_TYPE_DOUBLE);
+ if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
+ load32(src, dest);
+ return;
+ }
+ loadPtr(src, dest);
+ unboxNonDouble(dest, dest, type);
+ }
+
+ void unboxNonDouble(Register src, Register dest, JSValueType type) {
+ MOZ_ASSERT(type != JSVAL_TYPE_DOUBLE);
+ if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
+ slliw(dest, src, 0);
+ return;
+ }
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ MOZ_ASSERT(scratch != src);
+ mov(ImmWord(JSVAL_TYPE_TO_SHIFTED_TAG(type)), scratch);
+ xor_(dest, src, scratch);
+ }
+
+ template <typename T>
+ void unboxObjectOrNull(const T& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
+ static_assert(JS::detail::ValueObjectOrNullBit ==
+ (uint64_t(0x8) << JSVAL_TAG_SHIFT));
+ InsertBits(dest, zero, JSVAL_TAG_SHIFT + 3, 1);
+ }
+
+ void unboxGCThingForGCBarrier(const Address& src, Register dest) {
+ loadPtr(src, dest);
+ ExtractBits(dest, dest, 0, JSVAL_TAG_SHIFT - 1);
+ }
+ void unboxGCThingForGCBarrier(const ValueOperand& src, Register dest) {
+ ExtractBits(dest, src.valueReg(), 0, JSVAL_TAG_SHIFT - 1);
+ }
+
+ // Like unboxGCThingForGCBarrier, but loads the GC thing's chunk base.
+ void getGCThingValueChunk(const Address& src, Register dest) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(scratch != dest);
+ loadPtr(src, dest);
+ movePtr(ImmWord(JS::detail::ValueGCThingPayloadChunkMask), scratch);
+ and_(dest, dest, scratch);
+ }
+ void getGCThingValueChunk(const ValueOperand& src, Register dest) {
+ MOZ_ASSERT(src.valueReg() != dest);
+ movePtr(ImmWord(JS::detail::ValueGCThingPayloadChunkMask), dest);
+ and_(dest, dest, src.valueReg());
+ }
+
+ void unboxInt32(const ValueOperand& operand, Register dest);
+ void unboxInt32(Register src, Register dest);
+ void unboxInt32(const Address& src, Register dest);
+ void unboxInt32(const BaseIndex& src, Register dest);
+ void unboxBoolean(const ValueOperand& operand, Register dest);
+ void unboxBoolean(Register src, Register dest);
+ void unboxBoolean(const Address& src, Register dest);
+ void unboxBoolean(const BaseIndex& src, Register dest);
+ void unboxDouble(const ValueOperand& operand, FloatRegister dest);
+ void unboxDouble(Register src, Register dest);
+ void unboxDouble(const Address& src, FloatRegister dest);
+ void unboxDouble(const BaseIndex& src, FloatRegister dest);
+ void unboxString(const ValueOperand& operand, Register dest);
+ void unboxString(Register src, Register dest);
+ void unboxString(const Address& src, Register dest);
+ void unboxSymbol(const ValueOperand& src, Register dest);
+ void unboxSymbol(Register src, Register dest);
+ void unboxSymbol(const Address& src, Register dest);
+ void unboxBigInt(const ValueOperand& operand, Register dest);
+ void unboxBigInt(Register src, Register dest);
+ void unboxBigInt(const Address& src, Register dest);
+ void unboxObject(const ValueOperand& src, Register dest);
+ void unboxObject(Register src, Register dest);
+ void unboxObject(const Address& src, Register dest);
+ void unboxObject(const BaseIndex& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
+ }
+ void unboxValue(const ValueOperand& src, AnyRegister dest, JSValueType type);
+
+ void notBoolean(const ValueOperand& val) {
+ xori(val.valueReg(), val.valueReg(), 1);
+ }
+
+ // boxing code
+ void boxDouble(FloatRegister src, const ValueOperand& dest, FloatRegister);
+ void boxNonDouble(JSValueType type, Register src, const ValueOperand& dest);
+
+ // Extended unboxing API. If the payload is already in a register, returns
+ // that register. Otherwise, provides a move to the given scratch register,
+ // and returns that.
+ [[nodiscard]] Register extractObject(const Address& address,
+ Register scratch);
+ [[nodiscard]] Register extractObject(const ValueOperand& value,
+ Register scratch) {
+ unboxObject(value, scratch);
+ return scratch;
+ }
+ [[nodiscard]] Register extractString(const ValueOperand& value,
+ Register scratch) {
+ unboxString(value, scratch);
+ return scratch;
+ }
+ [[nodiscard]] Register extractSymbol(const ValueOperand& value,
+ Register scratch) {
+ unboxSymbol(value, scratch);
+ return scratch;
+ }
+ [[nodiscard]] Register extractInt32(const ValueOperand& value,
+ Register scratch) {
+ unboxInt32(value, scratch);
+ return scratch;
+ }
+ [[nodiscard]] Register extractBoolean(const ValueOperand& value,
+ Register scratch) {
+ unboxBoolean(value, scratch);
+ return scratch;
+ }
+ [[nodiscard]] Register extractTag(const Address& address, Register scratch);
+ [[nodiscard]] Register extractTag(const BaseIndex& address, Register scratch);
+ [[nodiscard]] Register extractTag(const ValueOperand& value,
+ Register scratch) {
+ splitTag(value, scratch);
+ return scratch;
+ }
+
+ void ensureDouble(const ValueOperand& source, FloatRegister dest,
+ Label* failure);
+
+ void boolValueToDouble(const ValueOperand& operand, FloatRegister dest);
+ void int32ValueToDouble(const ValueOperand& operand, FloatRegister dest);
+ void loadInt32OrDouble(const Address& src, FloatRegister dest);
+ void loadInt32OrDouble(const BaseIndex& addr, FloatRegister dest);
+ void loadConstantDouble(double dp, FloatRegister dest);
+
+ void boolValueToFloat32(const ValueOperand& operand, FloatRegister dest);
+ void int32ValueToFloat32(const ValueOperand& operand, FloatRegister dest);
+ void loadConstantFloat32(float f, FloatRegister dest);
+
+ void testNullSet(Condition cond, const ValueOperand& value, Register dest);
+
+ void testObjectSet(Condition cond, const ValueOperand& value, Register dest);
+
+ void testUndefinedSet(Condition cond, const ValueOperand& value,
+ Register dest);
+
+ // higher level tag testing code
+ Address ToPayload(Address value) { return value; }
+
+ template <typename T>
+ void loadUnboxedValue(const T& address, MIRType type, AnyRegister dest) {
+ if (dest.isFloat()) {
+ loadInt32OrDouble(address, dest.fpu());
+ } else {
+ unboxNonDouble(address, dest.gpr(), ValueTypeFromMIRType(type));
+ }
+ }
+
+ void storeUnboxedPayload(ValueOperand value, BaseIndex address, size_t nbytes,
+ JSValueType type) {
+ switch (nbytes) {
+ case 8: {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Register scratch2 = temps.Acquire();
+ if (type == JSVAL_TYPE_OBJECT) {
+ unboxObjectOrNull(value, scratch2);
+ } else {
+ unboxNonDouble(value, scratch2, type);
+ }
+ computeEffectiveAddress(address, scratch);
+ sd(scratch2, scratch, 0);
+ return;
+ }
+ case 4:
+ store32(value.valueReg(), address);
+ return;
+ case 1:
+ store8(value.valueReg(), address);
+ return;
+ default:
+ MOZ_CRASH("Bad payload width");
+ }
+ }
+
+ void storeUnboxedPayload(ValueOperand value, Address address, size_t nbytes,
+ JSValueType type) {
+ switch (nbytes) {
+ case 8: {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ if (type == JSVAL_TYPE_OBJECT) {
+ unboxObjectOrNull(value, scratch);
+ } else {
+ unboxNonDouble(value, scratch, type);
+ }
+ storePtr(scratch, address);
+ return;
+ }
+ case 4:
+ store32(value.valueReg(), address);
+ return;
+ case 1:
+ store8(value.valueReg(), address);
+ return;
+ default:
+ MOZ_CRASH("Bad payload width");
+ }
+ }
+
+ void boxValue(JSValueType type, Register src, Register dest) {
+ MOZ_ASSERT(src != dest);
+
+ JSValueTag tag = (JSValueTag)JSVAL_TYPE_TO_TAG(type);
+ ma_li(dest, Imm32(tag));
+ slli(dest, dest, JSVAL_TAG_SHIFT);
+ if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
+ InsertBits(dest, src, 0, 32);
+ } else {
+ InsertBits(dest, src, 0, JSVAL_TAG_SHIFT);
+ }
+ }
+
+ void storeValue(ValueOperand val, const Address& dest);
+ void storeValue(ValueOperand val, const BaseIndex& dest);
+ void storeValue(JSValueType type, Register reg, Address dest);
+ void storeValue(JSValueType type, Register reg, BaseIndex dest);
+ void storeValue(const Value& val, Address dest);
+ void storeValue(const Value& val, BaseIndex dest);
+ void storeValue(const Address& src, const Address& dest, Register temp) {
+ loadPtr(src, temp);
+ storePtr(temp, dest);
+ }
+
+ void storePrivateValue(Register src, const Address& dest) {
+ storePtr(src, dest);
+ }
+ void storePrivateValue(ImmGCPtr imm, const Address& dest) {
+ storePtr(imm, dest);
+ }
+
+ void loadValue(Address src, ValueOperand val);
+ void loadValue(const BaseIndex& src, ValueOperand val);
+
+ void loadUnalignedValue(const Address& src, ValueOperand dest) {
+ loadValue(src, dest);
+ }
+
+ void tagValue(JSValueType type, Register payload, ValueOperand dest);
+
+ void pushValue(ValueOperand val);
+ void popValue(ValueOperand val);
+ void pushValue(const Value& val) {
+ if (val.isGCThing()) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ writeDataRelocation(val);
+ movWithPatch(ImmWord(val.asRawBits()), scratch);
+ push(scratch);
+ } else {
+ push(ImmWord(val.asRawBits()));
+ }
+ }
+ void pushValue(JSValueType type, Register reg) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ boxValue(type, reg, scratch);
+ push(scratch);
+ }
+ void pushValue(const Address& addr);
+ void pushValue(const BaseIndex& addr, Register scratch) {
+ loadValue(addr, ValueOperand(scratch));
+ pushValue(ValueOperand(scratch));
+ }
+
+ void handleFailureWithHandlerTail(Label* profilerExitTail,
+ Label* bailoutTail);
+
+ /////////////////////////////////////////////////////////////////
+ // Common interface.
+ /////////////////////////////////////////////////////////////////
+ public:
+ // The following functions are exposed for use in platform-shared code.
+
+ inline void incrementInt32Value(const Address& addr);
+
+ void move32(Imm32 imm, Register dest);
+ void move32(Register src, Register dest);
+
+ void movePtr(Register src, Register dest);
+ void movePtr(ImmWord imm, Register dest);
+ void movePtr(ImmPtr imm, Register dest);
+ void movePtr(wasm::SymbolicAddress imm, Register dest);
+ void movePtr(ImmGCPtr imm, Register dest);
+
+ void load8SignExtend(const Address& address, Register dest);
+ void load8SignExtend(const BaseIndex& src, Register dest);
+
+ void load8ZeroExtend(const Address& address, Register dest);
+ void load8ZeroExtend(const BaseIndex& src, Register dest);
+
+ void load16SignExtend(const Address& address, Register dest);
+ void load16SignExtend(const BaseIndex& src, Register dest);
+
+ template <typename S>
+ void load16UnalignedSignExtend(const S& src, Register dest) {
+ load16SignExtend(src, dest);
+ }
+
+ void load16ZeroExtend(const Address& address, Register dest);
+ void load16ZeroExtend(const BaseIndex& src, Register dest);
+
+ void SignExtendByte(Register rd, Register rs) {
+ slli(rd, rs, xlen - 8);
+ srai(rd, rd, xlen - 8);
+ }
+
+ void SignExtendShort(Register rd, Register rs) {
+ slli(rd, rs, xlen - 16);
+ srai(rd, rd, xlen - 16);
+ }
+
+ void SignExtendWord(Register rd, Register rs) { sext_w(rd, rs); }
+ void ZeroExtendWord(Register rd, Register rs) {
+ slli(rd, rs, 32);
+ srli(rd, rd, 32);
+ }
+
+ template <typename S>
+ void load16UnalignedZeroExtend(const S& src, Register dest) {
+ load16ZeroExtend(src, dest);
+ }
+
+ void load32(const Address& address, Register dest);
+ void load32(const BaseIndex& address, Register dest);
+ void load32(AbsoluteAddress address, Register dest);
+ void load32(wasm::SymbolicAddress address, Register dest);
+
+ template <typename S>
+ void load32Unaligned(const S& src, Register dest) {
+ load32(src, dest);
+ }
+
+ void load64(const Address& address, Register64 dest) {
+ loadPtr(address, dest.reg);
+ }
+ void load64(const BaseIndex& address, Register64 dest) {
+ loadPtr(address, dest.reg);
+ }
+
+ void loadDouble(const Address& addr, FloatRegister dest) {
+ ma_loadDouble(dest, addr);
+ }
+ void loadDouble(const BaseIndex& src, FloatRegister dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ computeScaledAddress(src, scratch);
+ fld(dest, scratch, 0);
+ }
+
+ void loadFloat32(const Address& addr, FloatRegister dest) {
+ ma_loadFloat(dest, addr);
+ }
+ void loadFloat32(const BaseIndex& src, FloatRegister dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ computeScaledAddress(src, scratch);
+ flw(dest, scratch, 0);
+ }
+
+ template <typename S>
+ void load64Unaligned(const S& src, Register64 dest) {
+ load64(src, dest);
+ }
+
+ void loadPtr(const Address& address, Register dest);
+ void loadPtr(const BaseIndex& src, Register dest);
+ void loadPtr(AbsoluteAddress address, Register dest);
+ void loadPtr(wasm::SymbolicAddress address, Register dest);
+
+ void loadPrivate(const Address& address, Register dest);
+
+ void store8(Register src, const Address& address);
+ void store8(Imm32 imm, const Address& address);
+ void store8(Register src, const BaseIndex& address);
+ void store8(Imm32 imm, const BaseIndex& address);
+
+ void store16(Register src, const Address& address);
+ void store16(Imm32 imm, const Address& address);
+ void store16(Register src, const BaseIndex& address);
+ void store16(Imm32 imm, const BaseIndex& address);
+
+ template <typename T>
+ void store16Unaligned(Register src, const T& dest) {
+ store16(src, dest);
+ }
+
+ void store32(Register src, AbsoluteAddress address);
+ void store32(Register src, const Address& address);
+ void store32(Register src, const BaseIndex& address);
+ void store32(Imm32 src, const Address& address);
+ void store32(Imm32 src, const BaseIndex& address);
+
+ // NOTE: This will use second scratch on LOONG64. Only ARM needs the
+ // implementation without second scratch.
+ void store32_NoSecondScratch(Imm32 src, const Address& address) {
+ store32(src, address);
+ }
+
+ template <typename T>
+ void store32Unaligned(Register src, const T& dest) {
+ store32(src, dest);
+ }
+
+ void store64(Imm64 imm, Address address) {
+ storePtr(ImmWord(imm.value), address);
+ }
+ void store64(Imm64 imm, const BaseIndex& address) {
+ storePtr(ImmWord(imm.value), address);
+ }
+
+ void store64(Register64 src, Address address) { storePtr(src.reg, address); }
+ void store64(Register64 src, const BaseIndex& address) {
+ storePtr(src.reg, address);
+ }
+
+ template <typename T>
+ void store64Unaligned(Register64 src, const T& dest) {
+ store64(src, dest);
+ }
+
+ template <typename T>
+ void storePtr(ImmWord imm, T address);
+ template <typename T>
+ void storePtr(ImmPtr imm, T address);
+ template <typename T>
+ void storePtr(ImmGCPtr imm, T address);
+ void storePtr(Register src, const Address& address);
+ void storePtr(Register src, const BaseIndex& address);
+ void storePtr(Register src, AbsoluteAddress dest);
+
+ void moveDouble(FloatRegister src, FloatRegister dest) { fmv_d(dest, src); }
+
+ void zeroDouble(FloatRegister reg) { fmv_d_x(reg, zero); }
+
+ void convertUInt64ToDouble(Register src, FloatRegister dest);
+
+ void breakpoint(uint32_t value = 0);
+
+ void checkStackAlignment() {
+#ifdef DEBUG
+ Label aligned;
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ andi(scratch, sp, ABIStackAlignment - 1);
+ ma_b(scratch, zero, &aligned, Equal, ShortJump);
+ breakpoint();
+ bind(&aligned);
+#endif
+ };
+
+ static void calculateAlignedStackPointer(void** stackPointer);
+
+ void cmpPtrSet(Assembler::Condition cond, Address lhs, ImmPtr rhs,
+ Register dest);
+ void cmpPtrSet(Assembler::Condition cond, Register lhs, Address rhs,
+ Register dest);
+ void cmpPtrSet(Assembler::Condition cond, Address lhs, Register rhs,
+ Register dest);
+
+ void cmp32Set(Assembler::Condition cond, Register lhs, Address rhs,
+ Register dest);
+
+ protected:
+ bool buildOOLFakeExitFrame(void* fakeReturnAddr);
+
+ void wasmLoadI64Impl(const wasm::MemoryAccessDesc& access,
+ Register memoryBase, Register ptr, Register ptrScratch,
+ Register64 output, Register tmp);
+ void wasmStoreI64Impl(const wasm::MemoryAccessDesc& access, Register64 value,
+ Register memoryBase, Register ptr, Register ptrScratch,
+ Register tmp);
+
+ public:
+ void abiret() { jr(ra); }
+
+ void moveFloat32(FloatRegister src, FloatRegister dest) { fmv_s(dest, src); }
+
+ // Instrumentation for entering and leaving the profiler.
+ void profilerEnterFrame(Register framePtr, Register scratch);
+ void profilerExitFrame();
+};
+
+typedef MacroAssemblerRiscv64Compat MacroAssemblerSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_riscv64_MacroAssembler_riscv64_h */
diff --git a/js/src/jit/riscv64/MoveEmitter-riscv64.cpp b/js/src/jit/riscv64/MoveEmitter-riscv64.cpp
new file mode 100644
index 0000000000..79f8d176b2
--- /dev/null
+++ b/js/src/jit/riscv64/MoveEmitter-riscv64.cpp
@@ -0,0 +1,333 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/riscv64/MoveEmitter-riscv64.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+void MoveEmitterRiscv64::breakCycle(const MoveOperand& from,
+ const MoveOperand& to, MoveOp::Type type,
+ uint32_t slotId) {
+ // There is some pattern:
+ // (A -> B)
+ // (B -> A)
+ //
+ // This case handles (A -> B), which we reach first. We save B, then allow
+ // the original move to continue.
+ switch (type) {
+ case MoveOp::FLOAT32:
+ if (to.isMemory()) {
+ ScratchFloat32Scope fpscratch32(masm);
+ masm.loadFloat32(getAdjustedAddress(to), fpscratch32);
+ masm.storeFloat32(fpscratch32, cycleSlot(slotId));
+ } else {
+ masm.storeFloat32(to.floatReg(), cycleSlot(slotId));
+ }
+ break;
+ case MoveOp::DOUBLE:
+ if (to.isMemory()) {
+ ScratchDoubleScope fpscratch64(masm);
+ masm.loadDouble(getAdjustedAddress(to), fpscratch64);
+ masm.storeDouble(fpscratch64, cycleSlot(slotId));
+ } else {
+ masm.storeDouble(to.floatReg(), cycleSlot(slotId));
+ }
+ break;
+ case MoveOp::INT32:
+ if (to.isMemory()) {
+ UseScratchRegisterScope temps(&masm);
+ Register scratch2 = temps.Acquire();
+ masm.load32(getAdjustedAddress(to), scratch2);
+ masm.store32(scratch2, cycleSlot(0));
+ } else {
+ masm.store32(to.reg(), cycleSlot(0));
+ }
+ break;
+ case MoveOp::GENERAL:
+ if (to.isMemory()) {
+ UseScratchRegisterScope temps(&masm);
+ Register scratch2 = temps.Acquire();
+ masm.loadPtr(getAdjustedAddress(to), scratch2);
+ masm.storePtr(scratch2, cycleSlot(0));
+ } else {
+ masm.storePtr(to.reg(), cycleSlot(0));
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected move type");
+ }
+}
+
+void MoveEmitterRiscv64::completeCycle(const MoveOperand& from,
+ const MoveOperand& to, MoveOp::Type type,
+ uint32_t slotId) {
+ // There is some pattern:
+ // (A -> B)
+ // (B -> A)
+ //
+ // This case handles (B -> A), which we reach last. We emit a move from the
+ // saved value of B, to A.
+ switch (type) {
+ case MoveOp::FLOAT32:
+ if (to.isMemory()) {
+ ScratchFloat32Scope fpscratch32(masm);
+ masm.loadFloat32(cycleSlot(slotId), fpscratch32);
+ masm.storeFloat32(fpscratch32, getAdjustedAddress(to));
+ } else {
+ masm.loadFloat32(cycleSlot(slotId), to.floatReg());
+ }
+ break;
+ case MoveOp::DOUBLE:
+ if (to.isMemory()) {
+ ScratchDoubleScope fpscratch64(masm);
+ masm.loadDouble(cycleSlot(slotId), fpscratch64);
+ masm.storeDouble(fpscratch64, getAdjustedAddress(to));
+ } else {
+ masm.loadDouble(cycleSlot(slotId), to.floatReg());
+ }
+ break;
+ case MoveOp::INT32:
+ MOZ_ASSERT(slotId == 0);
+ if (to.isMemory()) {
+ UseScratchRegisterScope temps(&masm);
+ Register scratch2 = temps.Acquire();
+ masm.load32(cycleSlot(0), scratch2);
+ masm.store32(scratch2, getAdjustedAddress(to));
+ } else {
+ masm.load32(cycleSlot(0), to.reg());
+ }
+ break;
+ case MoveOp::GENERAL:
+ MOZ_ASSERT(slotId == 0);
+ if (to.isMemory()) {
+ UseScratchRegisterScope temps(&masm);
+ Register scratch2 = temps.Acquire();
+ masm.loadPtr(cycleSlot(0), scratch2);
+ masm.storePtr(scratch2, getAdjustedAddress(to));
+ } else {
+ masm.loadPtr(cycleSlot(0), to.reg());
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected move type");
+ }
+}
+
+void MoveEmitterRiscv64::emit(const MoveResolver& moves) {
+ if (moves.numCycles()) {
+ // Reserve stack for cycle resolution
+ static_assert(SpillSlotSize == 8);
+ masm.reserveStack(moves.numCycles() * SpillSlotSize);
+ pushedAtCycle_ = masm.framePushed();
+ }
+
+ for (size_t i = 0; i < moves.numMoves(); i++) {
+ emit(moves.getMove(i));
+ }
+}
+
+void MoveEmitterRiscv64::emit(const MoveOp& move) {
+ const MoveOperand& from = move.from();
+ const MoveOperand& to = move.to();
+
+ if (move.isCycleEnd() && move.isCycleBegin()) {
+ // A fun consequence of aliased registers is you can have multiple
+ // cycles at once, and one can end exactly where another begins.
+ breakCycle(from, to, move.endCycleType(), move.cycleBeginSlot());
+ completeCycle(from, to, move.type(), move.cycleEndSlot());
+ return;
+ }
+
+ if (move.isCycleEnd()) {
+ MOZ_ASSERT(inCycle_);
+ completeCycle(from, to, move.type(), move.cycleEndSlot());
+ MOZ_ASSERT(inCycle_ > 0);
+ inCycle_--;
+ return;
+ }
+
+ if (move.isCycleBegin()) {
+ breakCycle(from, to, move.endCycleType(), move.cycleBeginSlot());
+ inCycle_++;
+ }
+
+ switch (move.type()) {
+ case MoveOp::FLOAT32:
+ emitFloat32Move(from, to);
+ break;
+ case MoveOp::DOUBLE:
+ emitDoubleMove(from, to);
+ break;
+ case MoveOp::INT32:
+ emitInt32Move(from, to);
+ break;
+ case MoveOp::GENERAL:
+ emitMove(from, to);
+ break;
+ default:
+ MOZ_CRASH("Unexpected move type");
+ }
+}
+
+void MoveEmitterRiscv64::emitMove(const MoveOperand& from,
+ const MoveOperand& to) {
+ if (from.isGeneralReg()) {
+ if (to.isGeneralReg()) {
+ masm.movePtr(from.reg(), to.reg());
+ } else if (to.isMemory()) {
+ masm.storePtr(from.reg(), getAdjustedAddress(to));
+ } else {
+ MOZ_CRASH("Invalid emitMove arguments.");
+ }
+ } else if (from.isMemory()) {
+ if (to.isGeneralReg()) {
+ masm.loadPtr(getAdjustedAddress(from), to.reg());
+ } else if (to.isMemory()) {
+ UseScratchRegisterScope temps(&masm);
+ Register scratch2 = temps.Acquire();
+ masm.loadPtr(getAdjustedAddress(from), scratch2);
+ masm.storePtr(scratch2, getAdjustedAddress(to));
+ } else {
+ MOZ_CRASH("Invalid emitMove arguments.");
+ }
+ } else if (from.isEffectiveAddress()) {
+ if (to.isGeneralReg()) {
+ masm.computeEffectiveAddress(getAdjustedAddress(from), to.reg());
+ } else if (to.isMemory()) {
+ UseScratchRegisterScope temps(&masm);
+ Register scratch2 = temps.Acquire();
+ masm.computeEffectiveAddress(getAdjustedAddress(from), scratch2);
+ masm.storePtr(scratch2, getAdjustedAddress(to));
+ } else {
+ MOZ_CRASH("Invalid emitMove arguments.");
+ }
+ } else {
+ MOZ_CRASH("Invalid emitMove arguments.");
+ }
+}
+
+void MoveEmitterRiscv64::emitInt32Move(const MoveOperand& from,
+ const MoveOperand& to) {
+ if (from.isGeneralReg()) {
+ if (to.isGeneralReg()) {
+ masm.move32(from.reg(), to.reg());
+ } else if (to.isMemory()) {
+ masm.store32(from.reg(), getAdjustedAddress(to));
+ } else {
+ MOZ_CRASH("Invalid emitInt32Move arguments.");
+ }
+ } else if (from.isMemory()) {
+ if (to.isGeneralReg()) {
+ masm.load32(getAdjustedAddress(from), to.reg());
+ } else if (to.isMemory()) {
+ UseScratchRegisterScope temps(&masm);
+ Register scratch2 = temps.Acquire();
+ masm.load32(getAdjustedAddress(from), scratch2);
+ masm.store32(scratch2, getAdjustedAddress(to));
+ } else {
+ MOZ_CRASH("Invalid emitInt32Move arguments.");
+ }
+ } else if (from.isEffectiveAddress()) {
+ if (to.isGeneralReg()) {
+ masm.computeEffectiveAddress(getAdjustedAddress(from), to.reg());
+ } else if (to.isMemory()) {
+ UseScratchRegisterScope temps(&masm);
+ Register scratch2 = temps.Acquire();
+ masm.computeEffectiveAddress(getAdjustedAddress(from), scratch2);
+ masm.store32(scratch2, getAdjustedAddress(to));
+ } else {
+ MOZ_CRASH("Invalid emitInt32Move arguments.");
+ }
+ } else {
+ MOZ_CRASH("Invalid emitInt32Move arguments.");
+ }
+}
+
+void MoveEmitterRiscv64::emitFloat32Move(const MoveOperand& from,
+ const MoveOperand& to) {
+ if (from.isFloatReg()) {
+ if (to.isFloatReg()) {
+ masm.fmv_s(to.floatReg(), from.floatReg());
+ } else if (to.isGeneralReg()) {
+ // This should only be used when passing float parameter in a1,a2,a3
+ MOZ_ASSERT(to.reg() == a1 || to.reg() == a2 || to.reg() == a3);
+ masm.fmv_x_w(to.reg(), from.floatReg());
+ } else {
+ MOZ_ASSERT(to.isMemory());
+ masm.storeFloat32(from.floatReg(), getAdjustedAddress(to));
+ }
+ } else if (to.isFloatReg()) {
+ MOZ_ASSERT(from.isMemory());
+ masm.loadFloat32(getAdjustedAddress(from), to.floatReg());
+ } else if (to.isGeneralReg()) {
+ MOZ_ASSERT(from.isMemory());
+ // This should only be used when passing float parameter in a1,a2,a3
+ MOZ_ASSERT(to.reg() == a1 || to.reg() == a2 || to.reg() == a3);
+ masm.loadPtr(getAdjustedAddress(from), to.reg());
+ } else {
+ MOZ_ASSERT(from.isMemory());
+ MOZ_ASSERT(to.isMemory());
+ ScratchFloat32Scope fpscratch32(masm);
+ masm.loadFloat32(getAdjustedAddress(from), fpscratch32);
+ masm.storeFloat32(fpscratch32, getAdjustedAddress(to));
+ }
+}
+
+void MoveEmitterRiscv64::emitDoubleMove(const MoveOperand& from,
+ const MoveOperand& to) {
+ if (from.isFloatReg()) {
+ if (to.isFloatReg()) {
+ masm.fmv_d(to.floatReg(), from.floatReg());
+ } else if (to.isGeneralReg()) {
+ masm.fmv_x_d(to.reg(), from.floatReg());
+ } else {
+ MOZ_ASSERT(to.isMemory());
+ masm.storeDouble(from.floatReg(), getAdjustedAddress(to));
+ }
+ } else if (to.isFloatReg()) {
+ if (from.isMemory()) {
+ masm.loadDouble(getAdjustedAddress(from), to.floatReg());
+ } else {
+ masm.fmv_d_x(to.floatReg(), from.reg());
+ }
+ } else {
+ MOZ_ASSERT(from.isMemory());
+ MOZ_ASSERT(to.isMemory());
+ ScratchDoubleScope fpscratch64(masm);
+ masm.loadDouble(getAdjustedAddress(from), fpscratch64);
+ masm.storeDouble(fpscratch64, getAdjustedAddress(to));
+ }
+}
+
+Address MoveEmitterRiscv64::cycleSlot(uint32_t slot, uint32_t subslot) const {
+ int32_t offset = masm.framePushed() - pushedAtCycle_;
+ return Address(StackPointer, offset + slot * sizeof(double) + subslot);
+}
+
+int32_t MoveEmitterRiscv64::getAdjustedOffset(const MoveOperand& operand) {
+ MOZ_ASSERT(operand.isMemoryOrEffectiveAddress());
+ if (operand.base() != StackPointer) {
+ return operand.disp();
+ }
+
+ // Adjust offset if stack pointer has been moved.
+ return operand.disp() + masm.framePushed() - pushedAtStart_;
+}
+
+Address MoveEmitterRiscv64::getAdjustedAddress(const MoveOperand& operand) {
+ return Address(operand.base(), getAdjustedOffset(operand));
+}
+
+void MoveEmitterRiscv64::assertDone() { MOZ_ASSERT(inCycle_ == 0); }
+
+void MoveEmitterRiscv64::finish() {
+ assertDone();
+
+ masm.freeStack(masm.framePushed() - pushedAtStart_);
+}
diff --git a/js/src/jit/riscv64/MoveEmitter-riscv64.h b/js/src/jit/riscv64/MoveEmitter-riscv64.h
new file mode 100644
index 0000000000..34d86b5794
--- /dev/null
+++ b/js/src/jit/riscv64/MoveEmitter-riscv64.h
@@ -0,0 +1,70 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_riscv64_MoveEmitter_riscv64_h
+#define jit_riscv64_MoveEmitter_riscv64_h
+
+#include "mozilla/Assertions.h"
+#include "jit/MacroAssembler.h"
+#include "jit/riscv64/Architecture-riscv64.h"
+namespace js {
+namespace jit {
+
+class MacroAssemblerRiscv64;
+class MoveResolver;
+struct Register;
+
+class MoveEmitterRiscv64 {
+ uint32_t inCycle_;
+ MacroAssembler& masm;
+
+ // Original stack push value.
+ uint32_t pushedAtStart_;
+
+ // These store stack offsets to spill locations, snapshotting
+ // codegen->framePushed_ at the time they were allocated. They are -1 if no
+ // stack space has been allocated for that particular spill.
+ int32_t pushedAtCycle_;
+
+ // These are registers that are available for temporary use. They may be
+ // assigned InvalidReg. If no corresponding spill space has been assigned,
+ // then these registers do not need to be spilled.
+ Register spilledReg_;
+ FloatRegister spilledFloatReg_;
+
+ public:
+ explicit MoveEmitterRiscv64(MacroAssembler& m)
+ : inCycle_(0),
+ masm(m),
+ pushedAtStart_(masm.framePushed()),
+ pushedAtCycle_(-1),
+ spilledReg_(InvalidReg),
+ spilledFloatReg_(InvalidFloatReg) {}
+ void emit(const MoveResolver&);
+ void emit(const MoveOp& move);
+ void emitMove(const MoveOperand& from, const MoveOperand& to);
+ void emitInt32Move(const MoveOperand& from, const MoveOperand& to);
+ void emitFloat32Move(const MoveOperand& from, const MoveOperand& to);
+ void emitDoubleMove(const MoveOperand& from, const MoveOperand& to);
+ void finish();
+ void assertDone();
+ void setScratchRegister(Register) { MOZ_CRASH("Unimplement on riscv"); }
+ Address cycleSlot(uint32_t slot, uint32_t subslot = 0) const;
+ int32_t getAdjustedOffset(const MoveOperand& operand);
+ Address getAdjustedAddress(const MoveOperand& operand);
+
+ void breakCycle(const MoveOperand& from, const MoveOperand& to,
+ MoveOp::Type type, uint32_t slotId);
+ void completeCycle(const MoveOperand& from, const MoveOperand& to,
+ MoveOp::Type type, uint32_t slot);
+};
+
+typedef MoveEmitterRiscv64 MoveEmitter;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_riscv64_MoveEmitter_riscv64_h */
diff --git a/js/src/jit/riscv64/Register-riscv64.h b/js/src/jit/riscv64/Register-riscv64.h
new file mode 100644
index 0000000000..54664dcf96
--- /dev/null
+++ b/js/src/jit/riscv64/Register-riscv64.h
@@ -0,0 +1,186 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_riscv64_Register_riscv64_h
+#define jit_riscv64_Register_riscv64_h
+
+#include "mozilla/Assertions.h"
+
+#include <stdint.h>
+
+#include "jit/Registers.h"
+#include "jit/RegisterSets.h"
+
+namespace js {
+namespace jit {
+
+static constexpr Register zero{Registers::zero};
+static constexpr Register ra{Registers::ra};
+static constexpr Register tp{Registers::tp};
+static constexpr Register sp{Registers::sp};
+static constexpr Register gp{Registers::gp};
+static constexpr Register a0{Registers::a0};
+static constexpr Register a1{Registers::a1};
+static constexpr Register a2{Registers::a2};
+static constexpr Register a3{Registers::a3};
+static constexpr Register a4{Registers::a4};
+static constexpr Register a5{Registers::a5};
+static constexpr Register a6{Registers::a6};
+static constexpr Register a7{Registers::a7};
+static constexpr Register t0{Registers::t0};
+static constexpr Register t1{Registers::t1};
+static constexpr Register t2{Registers::t2};
+static constexpr Register t3{Registers::t3};
+static constexpr Register t4{Registers::t4};
+static constexpr Register t5{Registers::t5};
+static constexpr Register t6{Registers::t6};
+static constexpr Register fp{Registers::fp};
+static constexpr Register s1{Registers::s1};
+static constexpr Register s2{Registers::s2};
+static constexpr Register s3{Registers::s3};
+static constexpr Register s4{Registers::s4};
+static constexpr Register s5{Registers::s5};
+static constexpr Register s6{Registers::s6};
+static constexpr Register s7{Registers::s7};
+static constexpr Register s8{Registers::s8};
+static constexpr Register s9{Registers::s9};
+static constexpr Register s10{Registers::s10};
+static constexpr Register s11{Registers::s11};
+
+static constexpr FloatRegister ft0{FloatRegisters::f0};
+static constexpr FloatRegister ft1{FloatRegisters::f1};
+static constexpr FloatRegister ft2{FloatRegisters::f2};
+static constexpr FloatRegister ft3{FloatRegisters::f3};
+static constexpr FloatRegister ft4{FloatRegisters::f4};
+static constexpr FloatRegister ft5{FloatRegisters::f5};
+static constexpr FloatRegister ft6{FloatRegisters::f6};
+static constexpr FloatRegister ft7{FloatRegisters::f7};
+static constexpr FloatRegister fs0{FloatRegisters::f8};
+static constexpr FloatRegister fs1{FloatRegisters::f9};
+static constexpr FloatRegister fa0{FloatRegisters::f10};
+static constexpr FloatRegister fa1{FloatRegisters::f11};
+static constexpr FloatRegister fa2{FloatRegisters::f12};
+static constexpr FloatRegister fa3{FloatRegisters::f13};
+static constexpr FloatRegister fa4{FloatRegisters::f14};
+static constexpr FloatRegister fa5{FloatRegisters::f15};
+static constexpr FloatRegister fa6{FloatRegisters::f16};
+static constexpr FloatRegister fa7{FloatRegisters::f17};
+static constexpr FloatRegister fs2{FloatRegisters::f18};
+static constexpr FloatRegister fs3{FloatRegisters::f19};
+static constexpr FloatRegister fs4{FloatRegisters::f20};
+static constexpr FloatRegister fs5{FloatRegisters::f21};
+static constexpr FloatRegister fs6{FloatRegisters::f22};
+static constexpr FloatRegister fs7{FloatRegisters::f23};
+static constexpr FloatRegister fs8{FloatRegisters::f24};
+static constexpr FloatRegister fs9{FloatRegisters::f25};
+static constexpr FloatRegister fs10{FloatRegisters::f26};
+static constexpr FloatRegister fs11{FloatRegisters::f27};
+static constexpr FloatRegister ft8{FloatRegisters::f28};
+static constexpr FloatRegister ft9{FloatRegisters::f29};
+static constexpr FloatRegister ft10{FloatRegisters::f30};
+static constexpr FloatRegister ft11{FloatRegisters::f31};
+
+static constexpr Register StackPointer{Registers::sp};
+static constexpr Register FramePointer{Registers::fp};
+static constexpr Register ReturnReg{Registers::a0};
+static constexpr Register ScratchRegister{Registers::s11};
+static constexpr Register64 ReturnReg64(ReturnReg);
+
+static constexpr FloatRegister ReturnFloat32Reg{FloatRegisters::fa0};
+static constexpr FloatRegister ReturnDoubleReg{FloatRegisters::fa0};
+#ifdef ENABLE_WASM_SIMD
+static constexpr FloatRegister ReturnSimd128Reg{FloatRegisters::invalid_reg};
+static constexpr FloatRegister ScratchSimd128Reg{FloatRegisters::invalid_reg};
+#endif
+static constexpr FloatRegister InvalidFloatReg{};
+
+static constexpr FloatRegister ScratchFloat32Reg{FloatRegisters::ft10};
+static constexpr FloatRegister ScratchDoubleReg{FloatRegisters::ft10};
+static constexpr FloatRegister ScratchDoubleReg2{FloatRegisters::fs11};
+
+static constexpr Register OsrFrameReg{Registers::a3};
+static constexpr Register PreBarrierReg{Registers::a1};
+static constexpr Register InterpreterPCReg{Registers::t0};
+static constexpr Register CallTempReg0{Registers::t0};
+static constexpr Register CallTempReg1{Registers::t1};
+static constexpr Register CallTempReg2{Registers::t2};
+static constexpr Register CallTempReg3{Registers::t3};
+static constexpr Register CallTempReg4{Registers::a6};
+static constexpr Register CallTempReg5{Registers::a7};
+static constexpr Register InvalidReg{Registers::invalid_reg};
+static constexpr Register CallTempNonArgRegs[] = {t0, t1, t2, t3};
+static const uint32_t NumCallTempNonArgRegs = std::size(CallTempNonArgRegs);
+
+static constexpr Register IntArgReg0{Registers::a0};
+static constexpr Register IntArgReg1{Registers::a1};
+static constexpr Register IntArgReg2{Registers::a2};
+static constexpr Register IntArgReg3{Registers::a3};
+static constexpr Register IntArgReg4{Registers::a4};
+static constexpr Register IntArgReg5{Registers::a5};
+static constexpr Register IntArgReg6{Registers::a6};
+static constexpr Register IntArgReg7{Registers::a7};
+static constexpr Register HeapReg{Registers::s7};
+
+// Registers used by RegExpMatcher and RegExpExecMatch stubs (do not use
+// JSReturnOperand).
+static constexpr Register RegExpMatcherRegExpReg = CallTempReg0;
+static constexpr Register RegExpMatcherStringReg = CallTempReg1;
+static constexpr Register RegExpMatcherLastIndexReg = CallTempReg2;
+
+// Registers used by RegExpExecTest stub (do not use ReturnReg).
+static constexpr Register RegExpExecTestRegExpReg = CallTempReg0;
+static constexpr Register RegExpExecTestStringReg = CallTempReg1;
+
+// Registers used by RegExpSearcher stub (do not use ReturnReg).
+static constexpr Register RegExpSearcherRegExpReg = CallTempReg0;
+static constexpr Register RegExpSearcherStringReg = CallTempReg1;
+static constexpr Register RegExpSearcherLastIndexReg = CallTempReg2;
+
+static constexpr Register JSReturnReg_Type{Registers::a3};
+static constexpr Register JSReturnReg_Data{Registers::s2};
+static constexpr Register JSReturnReg{Registers::a2};
+static constexpr ValueOperand JSReturnOperand = ValueOperand(JSReturnReg);
+
+// These registers may be volatile or nonvolatile.
+static constexpr Register ABINonArgReg0{Registers::t0};
+static constexpr Register ABINonArgReg1{Registers::t1};
+static constexpr Register ABINonArgReg2{Registers::t2};
+static constexpr Register ABINonArgReg3{Registers::t3};
+
+// These registers may be volatile or nonvolatile.
+// Note: these three registers are all guaranteed to be different
+static constexpr Register ABINonArgReturnReg0{Registers::t0};
+static constexpr Register ABINonArgReturnReg1{Registers::t1};
+static constexpr Register ABINonVolatileReg{Registers::s1};
+
+// This register is guaranteed to be clobberable during the prologue and
+// epilogue of an ABI call which must preserve both ABI argument, return
+// and non-volatile registers.
+static constexpr Register ABINonArgReturnVolatileReg{Registers::t0};
+
+// This register may be volatile or nonvolatile.
+// Avoid ft11 which is the scratch register.
+static constexpr FloatRegister ABINonArgDoubleReg{FloatRegisters::ft11};
+
+static constexpr Register WasmTableCallScratchReg0{ABINonArgReg0};
+static constexpr Register WasmTableCallScratchReg1{ABINonArgReg1};
+static constexpr Register WasmTableCallSigReg{ABINonArgReg2};
+static constexpr Register WasmTableCallIndexReg{ABINonArgReg3};
+
+// Instance pointer argument register for WebAssembly functions. This must not
+// alias any other register used for passing function arguments or return
+// values. Preserved by WebAssembly functions. Must be nonvolatile.
+static constexpr Register InstanceReg{Registers::s4};
+
+static constexpr Register WasmJitEntryReturnScratch{Registers::t1};
+
+static constexpr Register WasmCallRefCallScratchReg0{ABINonArgReg0};
+static constexpr Register WasmCallRefCallScratchReg1{ABINonArgReg1};
+static constexpr Register WasmCallRefReg{ABINonArgReg3};
+} // namespace jit
+} // namespace js
+
+#endif // jit_riscv64_Register_riscv64_h
diff --git a/js/src/jit/riscv64/SharedICHelpers-riscv64-inl.h b/js/src/jit/riscv64/SharedICHelpers-riscv64-inl.h
new file mode 100644
index 0000000000..bd8667c5ec
--- /dev/null
+++ b/js/src/jit/riscv64/SharedICHelpers-riscv64-inl.h
@@ -0,0 +1,80 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_riscv64_SharedICHelpers_riscv64_inl_h
+#define jit_riscv64_SharedICHelpers_riscv64_inl_h
+
+#include "jit/SharedICHelpers.h"
+
+namespace js {
+namespace jit {
+
+inline void EmitBaselineTailCallVM(TrampolinePtr target, MacroAssembler& masm,
+ uint32_t argSize) {
+#ifdef DEBUG
+ Register scratch = R2.scratchReg();
+
+ // Compute frame size.
+ masm.movePtr(FramePointer, scratch);
+ masm.subPtr(StackPointer, scratch);
+
+ // Store frame size without VMFunction arguments for debug assertions.
+ masm.subPtr(Imm32(argSize), scratch);
+ Address frameSizeAddr(FramePointer,
+ BaselineFrame::reverseOffsetOfDebugFrameSize());
+ masm.store32(scratch, frameSizeAddr);
+ masm.addPtr(Imm32(argSize), scratch);
+#endif
+
+ // Push frame descriptor and perform the tail call.
+ // ICTailCallReg (ra) already contains the return address (as we
+ // keep it there through the stub calls), but the VMWrapper code being
+ // called expects the return address to also be pushed on the stack.
+ MOZ_ASSERT(ICTailCallReg == ra);
+ masm.pushFrameDescriptor(FrameType::BaselineJS);
+ masm.push(ra);
+
+ masm.jump(target);
+}
+
+inline void EmitBaselineCallVM(TrampolinePtr target, MacroAssembler& masm) {
+ masm.pushFrameDescriptor(FrameType::BaselineStub);
+ masm.call(target);
+}
+
+inline void EmitBaselineEnterStubFrame(MacroAssembler& masm, Register scratch) {
+ MOZ_ASSERT(scratch != ICTailCallReg);
+
+#ifdef DEBUG
+ // Compute frame size.
+ masm.movePtr(FramePointer, scratch);
+ masm.subPtr(StackPointer, scratch);
+
+ Address frameSizeAddr(FramePointer,
+ BaselineFrame::reverseOffsetOfDebugFrameSize());
+ masm.store32(scratch, frameSizeAddr);
+#endif
+
+ // Note: when making changes here, don't forget to update
+ // BaselineStubFrame if needed.
+
+ // Push frame descriptor and return address.
+ masm.PushFrameDescriptor(FrameType::BaselineJS);
+ masm.Push(ICTailCallReg);
+
+ // Save old frame pointer, stack pointer and stub reg.
+ masm.Push(FramePointer);
+ masm.movePtr(StackPointer, FramePointer);
+ masm.Push(ICStubReg);
+
+ // Stack should remain aligned.
+ masm.assertStackAlignment(sizeof(Value), 0);
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_riscv64_SharedICHelpers_riscv64_inl_h */
diff --git a/js/src/jit/riscv64/SharedICHelpers-riscv64.h b/js/src/jit/riscv64/SharedICHelpers-riscv64.h
new file mode 100644
index 0000000000..3411c6727e
--- /dev/null
+++ b/js/src/jit/riscv64/SharedICHelpers-riscv64.h
@@ -0,0 +1,77 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_riscv64_SharedICHelpers_riscv64_h
+#define jit_riscv64_SharedICHelpers_riscv64_h
+#include "jit/BaselineIC.h"
+#include "jit/JitFrames.h"
+#include "jit/MacroAssembler.h"
+#include "jit/SharedICRegisters.h"
+namespace js {
+namespace jit {
+
+static const size_t ICStackValueOffset = 0;
+
+inline void EmitRestoreTailCallReg(MacroAssembler& masm) {
+ // No-op on RISC-V because ra register is always holding the return address.
+}
+
+inline void EmitRepushTailCallReg(MacroAssembler& masm) {
+ // No-op on RISC-V because ra register is always holding the return address.
+}
+inline void EmitCallIC(MacroAssembler& masm, CodeOffset* callOffset) {
+ // The stub pointer must already be in ICStubReg.
+ // Load stubcode pointer from the ICStub.
+ // R2 won't be active when we call ICs, so we can use it as scratch.
+ masm.loadPtr(Address(ICStubReg, ICStub::offsetOfStubCode()), R2.scratchReg());
+
+ // Call the stubcode via a direct jump-and-link
+ masm.call(R2.scratchReg());
+ *callOffset = CodeOffset(masm.currentOffset());
+}
+inline void EmitReturnFromIC(MacroAssembler& masm) { masm.branch(ra); }
+inline void EmitBaselineLeaveStubFrame(MacroAssembler& masm) {
+ masm.loadPtr(
+ Address(FramePointer, BaselineStubFrameLayout::ICStubOffsetFromFP),
+ ICStubReg);
+
+ masm.movePtr(FramePointer, StackPointer);
+ masm.Pop(FramePointer);
+
+ // Load the return address.
+ masm.Pop(ICTailCallReg);
+
+ // Discard the frame descriptor.
+ {
+ UseScratchRegisterScope temps(&masm);
+ Register scratch2 = temps.Acquire();
+ masm.Pop(scratch2);
+ }
+
+ masm.checkStackAlignment();
+}
+
+inline void EmitStubGuardFailure(MacroAssembler& masm) {
+ // Load next stub into ICStubReg
+ masm.loadPtr(Address(ICStubReg, ICCacheIRStub::offsetOfNext()), ICStubReg);
+
+ // Return address is already loaded, just jump to the next stubcode.
+ MOZ_ASSERT(ICTailCallReg == ra);
+ masm.jump(Address(ICStubReg, ICStub::offsetOfStubCode()));
+}
+template <typename AddrType>
+inline void EmitPreBarrier(MacroAssembler& masm, const AddrType& addr,
+ MIRType type) {
+ // On RISC-V, $ra is clobbered by guardedCallPreBarrier. Save it first.
+ masm.push(ra);
+ masm.guardedCallPreBarrier(addr, type);
+ masm.pop(ra);
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_riscv64_SharedICHelpers_riscv64_h */
diff --git a/js/src/jit/riscv64/SharedICRegisters-riscv64.h b/js/src/jit/riscv64/SharedICRegisters-riscv64.h
new file mode 100644
index 0000000000..3dcefe51c7
--- /dev/null
+++ b/js/src/jit/riscv64/SharedICRegisters-riscv64.h
@@ -0,0 +1,38 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_riscv64_SharedICRegisters_riscv64_h
+#define jit_riscv64_SharedICRegisters_riscv64_h
+
+#include "jit/Registers.h"
+#include "jit/RegisterSets.h"
+#include "jit/riscv64/MacroAssembler-riscv64.h"
+
+namespace js {
+namespace jit {
+
+// ValueOperands R0, R1, and R2.
+// R0 == JSReturnReg, and R2 uses registers not preserved across calls. R1 value
+// should be preserved across calls.
+static constexpr ValueOperand R0(a2);
+static constexpr ValueOperand R1(s1);
+static constexpr ValueOperand R2(a0);
+
+// ICTailCallReg and ICStubReg
+// These use registers that are not preserved across calls.
+static constexpr Register ICTailCallReg = ra;
+static constexpr Register ICStubReg = t0;
+
+// FloatReg0 must be equal to ReturnFloatReg.
+static constexpr FloatRegister FloatReg0 = fa0;
+static constexpr FloatRegister FloatReg1 = fa1;
+static constexpr FloatRegister FloatReg2 = fa2;
+static constexpr FloatRegister FloatReg3 = fa3;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_riscv64_SharedICRegisters_riscv64_h */
diff --git a/js/src/jit/riscv64/Simulator-riscv64.cpp b/js/src/jit/riscv64/Simulator-riscv64.cpp
new file mode 100644
index 0000000000..02a668f185
--- /dev/null
+++ b/js/src/jit/riscv64/Simulator-riscv64.cpp
@@ -0,0 +1,4718 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#ifdef JS_SIMULATOR_RISCV64
+# include "jit/riscv64/Simulator-riscv64.h"
+
+# include "mozilla/Casting.h"
+# include "mozilla/FloatingPoint.h"
+# include "mozilla/IntegerPrintfMacros.h"
+# include "mozilla/Likely.h"
+# include "mozilla/MathAlgorithms.h"
+
+# include <float.h>
+# include <iostream>
+# include <limits>
+
+# include "jit/AtomicOperations.h"
+# include "jit/riscv64/Assembler-riscv64.h"
+# include "js/Conversions.h"
+# include "js/UniquePtr.h"
+# include "js/Utility.h"
+# include "threading/LockGuard.h"
+# include "vm/JSContext.h"
+# include "vm/Runtime.h"
+# include "wasm/WasmInstance.h"
+# include "wasm/WasmSignalHandlers.h"
+
+# define I8(v) static_cast<int8_t>(v)
+# define I16(v) static_cast<int16_t>(v)
+# define U16(v) static_cast<uint16_t>(v)
+# define I32(v) static_cast<int32_t>(v)
+# define U32(v) static_cast<uint32_t>(v)
+# define I64(v) static_cast<int64_t>(v)
+# define U64(v) static_cast<uint64_t>(v)
+# define I128(v) static_cast<__int128_t>(v)
+# define U128(v) static_cast<__uint128_t>(v)
+
+# define REGIx_FORMAT PRIx64
+# define REGId_FORMAT PRId64
+
+# define I32_CHECK(v) \
+ ({ \
+ MOZ_ASSERT(I64(I32(v)) == I64(v)); \
+ I32((v)); \
+ })
+
+namespace js {
+namespace jit {
+
+bool Simulator::FLAG_trace_sim = false;
+bool Simulator::FLAG_debug_sim = false;
+bool Simulator::FLAG_riscv_trap_to_simulator_debugger = false;
+bool Simulator::FLAG_riscv_print_watchpoint = false;
+
+static void UNIMPLEMENTED() {
+ printf("UNIMPLEMENTED instruction.\n");
+ MOZ_CRASH();
+}
+static void UNREACHABLE() {
+ printf("UNREACHABLE instruction.\n");
+ MOZ_CRASH();
+}
+# define UNSUPPORTED() \
+ std::cout << "Unrecognized instruction [@pc=0x" << std::hex \
+ << registers_[pc] << "]: 0x" << instr_.InstructionBits() \
+ << std::endl; \
+ printf("Unsupported instruction.\n"); \
+ MOZ_CRASH();
+
+static char* ReadLine(const char* prompt) {
+ UniqueChars result;
+ char lineBuf[256];
+ int offset = 0;
+ bool keepGoing = true;
+ fprintf(stdout, "%s", prompt);
+ fflush(stdout);
+ while (keepGoing) {
+ if (fgets(lineBuf, sizeof(lineBuf), stdin) == nullptr) {
+ // fgets got an error. Just give up.
+ return nullptr;
+ }
+ int len = strlen(lineBuf);
+ if (len > 0 && lineBuf[len - 1] == '\n') {
+ // Since we read a new line we are done reading the line. This
+ // will exit the loop after copying this buffer into the result.
+ keepGoing = false;
+ }
+ if (!result) {
+ // Allocate the initial result and make room for the terminating '\0'
+ result.reset(js_pod_malloc<char>(len + 1));
+ if (!result) {
+ return nullptr;
+ }
+ } else {
+ // Allocate a new result with enough room for the new addition.
+ int new_len = offset + len + 1;
+ char* new_result = js_pod_malloc<char>(new_len);
+ if (!new_result) {
+ return nullptr;
+ }
+ // Copy the existing input into the new array and set the new
+ // array as the result.
+ memcpy(new_result, result.get(), offset * sizeof(char));
+ result.reset(new_result);
+ }
+ // Copy the newly read line into the result.
+ memcpy(result.get() + offset, lineBuf, len * sizeof(char));
+ offset += len;
+ }
+
+ MOZ_ASSERT(result);
+ result[offset] = '\0';
+ return result.release();
+}
+
+// -----------------------------------------------------------------------------
+// Riscv assembly various constants.
+
+// C/C++ argument slots size.
+const int kCArgSlotCount = 0;
+const int kCArgsSlotsSize = kCArgSlotCount * sizeof(uintptr_t);
+const int kBranchReturnOffset = 2 * kInstrSize;
+
+class CachePage {
+ public:
+ static const int LINE_VALID = 0;
+ static const int LINE_INVALID = 1;
+
+ static const int kPageShift = 12;
+ static const int kPageSize = 1 << kPageShift;
+ static const int kPageMask = kPageSize - 1;
+ static const int kLineShift = 2; // The cache line is only 4 bytes right now.
+ static const int kLineLength = 1 << kLineShift;
+ static const int kLineMask = kLineLength - 1;
+
+ CachePage() { memset(&validity_map_, LINE_INVALID, sizeof(validity_map_)); }
+
+ char* validityByte(int offset) {
+ return &validity_map_[offset >> kLineShift];
+ }
+
+ char* cachedData(int offset) { return &data_[offset]; }
+
+ private:
+ char data_[kPageSize]; // The cached data.
+ static const int kValidityMapSize = kPageSize >> kLineShift;
+ char validity_map_[kValidityMapSize]; // One byte per line.
+};
+
+// Protects the icache() and redirection() properties of the
+// Simulator.
+class AutoLockSimulatorCache : public LockGuard<Mutex> {
+ using Base = LockGuard<Mutex>;
+
+ public:
+ explicit AutoLockSimulatorCache()
+ : Base(SimulatorProcess::singleton_->cacheLock_) {}
+};
+
+mozilla::Atomic<size_t, mozilla::ReleaseAcquire>
+ SimulatorProcess::ICacheCheckingDisableCount(
+ 1); // Checking is disabled by default.
+SimulatorProcess* SimulatorProcess::singleton_ = nullptr;
+
+int64_t Simulator::StopSimAt = -1;
+
+static bool IsFlag(const char* found, const char* flag) {
+ return strlen(found) == strlen(flag) && strcmp(found, flag) == 0;
+}
+
+Simulator* Simulator::Create() {
+ auto sim = MakeUnique<Simulator>();
+ if (!sim) {
+ return nullptr;
+ }
+
+ if (!sim->init()) {
+ return nullptr;
+ }
+
+ int64_t stopAt;
+ char* stopAtStr = getenv("RISCV_SIM_STOP_AT");
+ if (stopAtStr && sscanf(stopAtStr, "%" PRIi64, &stopAt) == 1) {
+ fprintf(stderr, "\nStopping simulation at icount %" PRIi64 "\n", stopAt);
+ Simulator::StopSimAt = stopAt;
+ }
+ char* str = getenv("RISCV_TRACE_SIM");
+ if (str != nullptr && IsFlag(str, "true")) {
+ FLAG_trace_sim = true;
+ }
+
+ return sim.release();
+}
+
+void Simulator::Destroy(Simulator* sim) { js_delete(sim); }
+
+# if JS_CODEGEN_RISCV64
+void Simulator::TraceRegWr(int64_t value, TraceType t) {
+ if (FLAG_trace_sim) {
+ union {
+ int64_t fmt_int64;
+ int32_t fmt_int32[2];
+ float fmt_float[2];
+ double fmt_double;
+ } v;
+ v.fmt_int64 = value;
+
+ switch (t) {
+ case WORD:
+ SNPrintF(trace_buf_,
+ "%016" REGIx_FORMAT " (%" PRId64 ") int32:%" PRId32
+ " uint32:%" PRIu32,
+ v.fmt_int64, icount_, v.fmt_int32[0], v.fmt_int32[0]);
+ break;
+ case DWORD:
+ SNPrintF(trace_buf_,
+ "%016" REGIx_FORMAT " (%" PRId64 ") int64:%" REGId_FORMAT
+ " uint64:%" PRIu64,
+ value, icount_, value, value);
+ break;
+ case FLOAT:
+ SNPrintF(trace_buf_, "%016" REGIx_FORMAT " (%" PRId64 ") flt:%e",
+ v.fmt_int64, icount_, v.fmt_float[0]);
+ break;
+ case DOUBLE:
+ SNPrintF(trace_buf_, "%016" REGIx_FORMAT " (%" PRId64 ") dbl:%e",
+ v.fmt_int64, icount_, v.fmt_double);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+
+# elif JS_CODEGEN_RISCV32
+template <typename T>
+void Simulator::TraceRegWr(T value, TraceType t) {
+ if (::v8::internal::FLAG_trace_sim) {
+ union {
+ int32_t fmt_int32;
+ float fmt_float;
+ double fmt_double;
+ } v;
+ if (t != DOUBLE) {
+ v.fmt_int32 = value;
+ } else {
+ DCHECK_EQ(sizeof(T), 8);
+ v.fmt_double = value;
+ }
+ switch (t) {
+ case WORD:
+ SNPrintF(trace_buf_,
+ "%016" REGIx_FORMAT " (%" PRId64 ") int32:%" REGId_FORMAT
+ " uint32:%" PRIu32,
+ v.fmt_int32, icount_, v.fmt_int32, v.fmt_int32);
+ break;
+ case FLOAT:
+ SNPrintF(trace_buf_, "%016" REGIx_FORMAT " (%" PRId64 ") flt:%e",
+ v.fmt_int32, icount_, v.fmt_float);
+ break;
+ case DOUBLE:
+ SNPrintF(trace_buf_, "%016" PRIx64 " (%" PRId64 ") dbl:%e",
+ static_cast<int64_t>(v.fmt_double), icount_, v.fmt_double);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+# endif
+// The RiscvDebugger class is used by the simulator while debugging simulated
+// code.
+class RiscvDebugger {
+ public:
+ explicit RiscvDebugger(Simulator* sim) : sim_(sim) {}
+
+ void Debug();
+ // Print all registers with a nice formatting.
+ void PrintRegs(char name_prefix, int start_index, int end_index);
+ void printAllRegs();
+ void printAllRegsIncludingFPU();
+
+ static const Instr kNopInstr = 0x0;
+
+ private:
+ Simulator* sim_;
+
+ int64_t GetRegisterValue(int regnum);
+ int64_t GetFPURegisterValue(int regnum);
+ float GetFPURegisterValueFloat(int regnum);
+ double GetFPURegisterValueDouble(int regnum);
+# ifdef CAN_USE_RVV_INSTRUCTIONS
+ __int128_t GetVRegisterValue(int regnum);
+# endif
+ bool GetValue(const char* desc, int64_t* value);
+};
+
+int64_t RiscvDebugger::GetRegisterValue(int regnum) {
+ if (regnum == Simulator::Register::kNumSimuRegisters) {
+ return sim_->get_pc();
+ } else {
+ return sim_->getRegister(regnum);
+ }
+}
+
+int64_t RiscvDebugger::GetFPURegisterValue(int regnum) {
+ if (regnum == Simulator::FPURegister::kNumFPURegisters) {
+ return sim_->get_pc();
+ } else {
+ return sim_->getFpuRegister(regnum);
+ }
+}
+
+float RiscvDebugger::GetFPURegisterValueFloat(int regnum) {
+ if (regnum == Simulator::FPURegister::kNumFPURegisters) {
+ return sim_->get_pc();
+ } else {
+ return sim_->getFpuRegisterFloat(regnum);
+ }
+}
+
+double RiscvDebugger::GetFPURegisterValueDouble(int regnum) {
+ if (regnum == Simulator::FPURegister::kNumFPURegisters) {
+ return sim_->get_pc();
+ } else {
+ return sim_->getFpuRegisterDouble(regnum);
+ }
+}
+
+# ifdef CAN_USE_RVV_INSTRUCTIONS
+__int128_t RiscvDebugger::GetVRegisterValue(int regnum) {
+ if (regnum == kNumVRegisters) {
+ return sim_->get_pc();
+ } else {
+ return sim_->get_vregister(regnum);
+ }
+}
+# endif
+
+bool RiscvDebugger::GetValue(const char* desc, int64_t* value) {
+ int regnum = Registers::FromName(desc);
+ int fpuregnum = FloatRegisters::FromName(desc);
+
+ if (regnum != Registers::invalid_reg) {
+ *value = GetRegisterValue(regnum);
+ return true;
+ } else if (fpuregnum != FloatRegisters::invalid_reg) {
+ *value = GetFPURegisterValue(fpuregnum);
+ return true;
+ } else if (strncmp(desc, "0x", 2) == 0) {
+ return sscanf(desc + 2, "%" SCNx64, reinterpret_cast<int64_t*>(value)) == 1;
+ } else {
+ return sscanf(desc, "%" SCNu64, reinterpret_cast<int64_t*>(value)) == 1;
+ }
+}
+
+# define REG_INFO(name) \
+ name, GetRegisterValue(Registers::FromName(name)), \
+ GetRegisterValue(Registers::FromName(name))
+
+void RiscvDebugger::PrintRegs(char name_prefix, int start_index,
+ int end_index) {
+ EmbeddedVector<char, 10> name1, name2;
+ MOZ_ASSERT(name_prefix == 'a' || name_prefix == 't' || name_prefix == 's');
+ MOZ_ASSERT(start_index >= 0 && end_index <= 99);
+ int num_registers = (end_index - start_index) + 1;
+ for (int i = 0; i < num_registers / 2; i++) {
+ SNPrintF(name1, "%c%d", name_prefix, start_index + 2 * i);
+ SNPrintF(name2, "%c%d", name_prefix, start_index + 2 * i + 1);
+ printf("%3s: 0x%016" REGIx_FORMAT " %14" REGId_FORMAT
+ " \t%3s: 0x%016" REGIx_FORMAT " %14" REGId_FORMAT " \n",
+ REG_INFO(name1.start()), REG_INFO(name2.start()));
+ }
+ if (num_registers % 2 == 1) {
+ SNPrintF(name1, "%c%d", name_prefix, end_index);
+ printf("%3s: 0x%016" REGIx_FORMAT " %14" REGId_FORMAT " \n",
+ REG_INFO(name1.start()));
+ }
+}
+
+void RiscvDebugger::printAllRegs() {
+ printf("\n");
+ // ra, sp, gp
+ printf("%3s: 0x%016" REGIx_FORMAT " %14" REGId_FORMAT
+ "\t%3s: 0x%016" REGIx_FORMAT " %14" REGId_FORMAT
+ "\t%3s: 0x%016" REGIx_FORMAT " %14" REGId_FORMAT "\n",
+ REG_INFO("ra"), REG_INFO("sp"), REG_INFO("gp"));
+
+ // tp, fp, pc
+ printf("%3s: 0x%016" REGIx_FORMAT " %14" REGId_FORMAT
+ "\t%3s: 0x%016" REGIx_FORMAT " %14" REGId_FORMAT
+ "\t%3s: 0x%016" REGIx_FORMAT " %14" REGId_FORMAT "\n",
+ REG_INFO("tp"), REG_INFO("fp"), REG_INFO("pc"));
+
+ // print register a0, .., a7
+ PrintRegs('a', 0, 7);
+ // print registers s1, ..., s11
+ PrintRegs('s', 1, 11);
+ // print registers t0, ..., t6
+ PrintRegs('t', 0, 6);
+}
+
+# undef REG_INFO
+
+void RiscvDebugger::printAllRegsIncludingFPU() {
+# define FPU_REG_INFO(n) \
+ FloatRegisters::GetName(n), GetFPURegisterValue(n), \
+ GetFPURegisterValueDouble(n)
+
+ printAllRegs();
+
+ printf("\n\n");
+ // f0, f1, f2, ... f31.
+ MOZ_ASSERT(kNumFPURegisters % 2 == 0);
+ for (int i = 0; i < kNumFPURegisters; i += 2)
+ printf("%3s: 0x%016" PRIx64 " %16.4e \t%3s: 0x%016" PRIx64 " %16.4e\n",
+ FPU_REG_INFO(i), FPU_REG_INFO(i + 1));
+# undef FPU_REG_INFO
+}
+
+void RiscvDebugger::Debug() {
+ intptr_t last_pc = -1;
+ bool done = false;
+
+# define COMMAND_SIZE 63
+# define ARG_SIZE 255
+
+# define STR(a) #a
+# define XSTR(a) STR(a)
+
+ char cmd[COMMAND_SIZE + 1];
+ char arg1[ARG_SIZE + 1];
+ char arg2[ARG_SIZE + 1];
+ char* argv[3] = {cmd, arg1, arg2};
+
+ // Make sure to have a proper terminating character if reaching the limit.
+ cmd[COMMAND_SIZE] = 0;
+ arg1[ARG_SIZE] = 0;
+ arg2[ARG_SIZE] = 0;
+
+ while (!done && (sim_->get_pc() != Simulator::end_sim_pc)) {
+ if (last_pc != sim_->get_pc()) {
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ // Use a reasonably large buffer.
+ EmbeddedVector<char, 256> buffer;
+ dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(sim_->get_pc()));
+ printf(" 0x%016" REGIx_FORMAT " %s\n", sim_->get_pc(), buffer.start());
+ last_pc = sim_->get_pc();
+ }
+ char* line = ReadLine("sim> ");
+ if (line == nullptr) {
+ break;
+ } else {
+ char* last_input = sim_->lastDebuggerInput();
+ if (strcmp(line, "\n") == 0 && last_input != nullptr) {
+ line = last_input;
+ } else {
+ // Ownership is transferred to sim_;
+ sim_->setLastDebuggerInput(line);
+ }
+ // Use sscanf to parse the individual parts of the command line. At the
+ // moment no command expects more than two parameters.
+ int argc = sscanf(
+ line,
+ "%" XSTR(COMMAND_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s",
+ cmd, arg1, arg2);
+ if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) {
+ SimInstruction* instr =
+ reinterpret_cast<SimInstruction*>(sim_->get_pc());
+ if (!(instr->IsTrap()) ||
+ instr->InstructionBits() == rtCallRedirInstr) {
+ sim_->icount_++;
+ sim_->InstructionDecode(
+ reinterpret_cast<Instruction*>(sim_->get_pc()));
+ } else {
+ // Allow si to jump over generated breakpoints.
+ printf("/!\\ Jumping over generated breakpoint.\n");
+ sim_->set_pc(sim_->get_pc() + kInstrSize);
+ }
+ } else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) {
+ // Leave the debugger shell.
+ done = true;
+ } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
+ if (argc == 2) {
+ int64_t value;
+ int64_t fvalue;
+ double dvalue;
+ if (strcmp(arg1, "all") == 0) {
+ printAllRegs();
+ } else if (strcmp(arg1, "allf") == 0) {
+ printAllRegsIncludingFPU();
+ } else {
+ int regnum = Registers::FromName(arg1);
+ int fpuregnum = FloatRegisters::FromName(arg1);
+# ifdef CAN_USE_RVV_INSTRUCTIONS
+ int vregnum = VRegisters::FromName(arg1);
+# endif
+ if (regnum != Registers::invalid_reg) {
+ value = GetRegisterValue(regnum);
+ printf("%s: 0x%08" REGIx_FORMAT " %" REGId_FORMAT " \n", arg1,
+ value, value);
+ } else if (fpuregnum != FloatRegisters::invalid_reg) {
+ fvalue = GetFPURegisterValue(fpuregnum);
+ dvalue = GetFPURegisterValueDouble(fpuregnum);
+ printf("%3s: 0x%016" PRIx64 " %16.4e\n",
+ FloatRegisters::GetName(fpuregnum), fvalue, dvalue);
+# ifdef CAN_USE_RVV_INSTRUCTIONS
+ } else if (vregnum != kInvalidVRegister) {
+ __int128_t v = GetVRegisterValue(vregnum);
+ printf("\t%s:0x%016" REGIx_FORMAT "%016" REGIx_FORMAT "\n",
+ VRegisters::GetName(vregnum), (uint64_t)(v >> 64),
+ (uint64_t)v);
+# endif
+ } else {
+ printf("%s unrecognized\n", arg1);
+ }
+ }
+ } else {
+ if (argc == 3) {
+ if (strcmp(arg2, "single") == 0) {
+ int64_t value;
+ float fvalue;
+ int fpuregnum = FloatRegisters::FromName(arg1);
+
+ if (fpuregnum != FloatRegisters::invalid_reg) {
+ value = GetFPURegisterValue(fpuregnum);
+ value &= 0xFFFFFFFFUL;
+ fvalue = GetFPURegisterValueFloat(fpuregnum);
+ printf("%s: 0x%08" PRIx64 " %11.4e\n", arg1, value, fvalue);
+ } else {
+ printf("%s unrecognized\n", arg1);
+ }
+ } else {
+ printf("print <fpu register> single\n");
+ }
+ } else {
+ printf("print <register> or print <fpu register> single\n");
+ }
+ }
+ } else if ((strcmp(cmd, "po") == 0) ||
+ (strcmp(cmd, "printobject") == 0)) {
+ UNIMPLEMENTED();
+ } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0) {
+ int64_t* cur = nullptr;
+ int64_t* end = nullptr;
+ int next_arg = 1;
+ if (argc < 2) {
+ printf("Need to specify <address> to memhex command\n");
+ continue;
+ }
+ int64_t value;
+ if (!GetValue(arg1, &value)) {
+ printf("%s unrecognized\n", arg1);
+ continue;
+ }
+ cur = reinterpret_cast<int64_t*>(value);
+ next_arg++;
+
+ int64_t words;
+ if (argc == next_arg) {
+ words = 10;
+ } else {
+ if (!GetValue(argv[next_arg], &words)) {
+ words = 10;
+ }
+ }
+ end = cur + words;
+
+ while (cur < end) {
+ printf(" 0x%012" PRIxPTR " : 0x%016" REGIx_FORMAT
+ " %14" REGId_FORMAT " ",
+ reinterpret_cast<intptr_t>(cur), *cur, *cur);
+ printf("\n");
+ cur++;
+ }
+ } else if ((strcmp(cmd, "watch") == 0)) {
+ if (argc < 2) {
+ printf("Need to specify <address> to mem command\n");
+ continue;
+ }
+ int64_t value;
+ if (!GetValue(arg1, &value)) {
+ printf("%s unrecognized\n", arg1);
+ continue;
+ }
+ sim_->watch_address_ = reinterpret_cast<int64_t*>(value);
+ sim_->watch_value_ = *(sim_->watch_address_);
+ } else if ((strcmp(cmd, "disasm") == 0) || (strcmp(cmd, "dpc") == 0) ||
+ (strcmp(cmd, "di") == 0)) {
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ // Use a reasonably large buffer.
+ EmbeddedVector<char, 256> buffer;
+
+ byte* cur = nullptr;
+ byte* end = nullptr;
+
+ if (argc == 1) {
+ cur = reinterpret_cast<byte*>(sim_->get_pc());
+ end = cur + (10 * kInstrSize);
+ } else if (argc == 2) {
+ auto regnum = Registers::FromName(arg1);
+ if (regnum != Registers::invalid_reg || strncmp(arg1, "0x", 2) == 0) {
+ // The argument is an address or a register name.
+ sreg_t value;
+ if (GetValue(arg1, &value)) {
+ cur = reinterpret_cast<byte*>(value);
+ // Disassemble 10 instructions at <arg1>.
+ end = cur + (10 * kInstrSize);
+ }
+ } else {
+ // The argument is the number of instructions.
+ sreg_t value;
+ if (GetValue(arg1, &value)) {
+ cur = reinterpret_cast<byte*>(sim_->get_pc());
+ // Disassemble <arg1> instructions.
+ end = cur + (value * kInstrSize);
+ }
+ }
+ } else {
+ sreg_t value1;
+ sreg_t value2;
+ if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
+ cur = reinterpret_cast<byte*>(value1);
+ end = cur + (value2 * kInstrSize);
+ }
+ }
+ while (cur < end) {
+ dasm.InstructionDecode(buffer, cur);
+ printf(" 0x%08" PRIxPTR " %s\n", reinterpret_cast<intptr_t>(cur),
+ buffer.start());
+ cur += kInstrSize;
+ }
+ } else if (strcmp(cmd, "trace") == 0) {
+ Simulator::FLAG_trace_sim = true;
+ Simulator::FLAG_riscv_print_watchpoint = true;
+ } else if (strcmp(cmd, "break") == 0 || strcmp(cmd, "b") == 0 ||
+ strcmp(cmd, "tbreak") == 0) {
+ bool is_tbreak = strcmp(cmd, "tbreak") == 0;
+ if (argc == 2) {
+ int64_t value;
+ if (GetValue(arg1, &value)) {
+ sim_->SetBreakpoint(reinterpret_cast<SimInstruction*>(value),
+ is_tbreak);
+ } else {
+ printf("%s unrecognized\n", arg1);
+ }
+ } else {
+ sim_->ListBreakpoints();
+ printf("Use `break <address>` to set or disable a breakpoint\n");
+ printf(
+ "Use `tbreak <address>` to set or disable a temporary "
+ "breakpoint\n");
+ }
+ } else if (strcmp(cmd, "flags") == 0) {
+ printf("No flags on RISC-V !\n");
+ } else if (strcmp(cmd, "stop") == 0) {
+ int64_t value;
+ if (argc == 3) {
+ // Print information about all/the specified breakpoint(s).
+ if (strcmp(arg1, "info") == 0) {
+ if (strcmp(arg2, "all") == 0) {
+ printf("Stop information:\n");
+ for (uint32_t i = kMaxWatchpointCode + 1; i <= kMaxStopCode;
+ i++) {
+ sim_->printStopInfo(i);
+ }
+ } else if (GetValue(arg2, &value)) {
+ sim_->printStopInfo(value);
+ } else {
+ printf("Unrecognized argument.\n");
+ }
+ } else if (strcmp(arg1, "enable") == 0) {
+ // Enable all/the specified breakpoint(s).
+ if (strcmp(arg2, "all") == 0) {
+ for (uint32_t i = kMaxWatchpointCode + 1; i <= kMaxStopCode;
+ i++) {
+ sim_->enableStop(i);
+ }
+ } else if (GetValue(arg2, &value)) {
+ sim_->enableStop(value);
+ } else {
+ printf("Unrecognized argument.\n");
+ }
+ } else if (strcmp(arg1, "disable") == 0) {
+ // Disable all/the specified breakpoint(s).
+ if (strcmp(arg2, "all") == 0) {
+ for (uint32_t i = kMaxWatchpointCode + 1; i <= kMaxStopCode;
+ i++) {
+ sim_->disableStop(i);
+ }
+ } else if (GetValue(arg2, &value)) {
+ sim_->disableStop(value);
+ } else {
+ printf("Unrecognized argument.\n");
+ }
+ }
+ } else {
+ printf("Wrong usage. Use help command for more information.\n");
+ }
+ } else if ((strcmp(cmd, "stat") == 0) || (strcmp(cmd, "st") == 0)) {
+ UNIMPLEMENTED();
+ } else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) {
+ printf("cont (alias 'c')\n");
+ printf(" Continue execution\n");
+ printf("stepi (alias 'si')\n");
+ printf(" Step one instruction\n");
+ printf("print (alias 'p')\n");
+ printf(" print <register>\n");
+ printf(" Print register content\n");
+ printf(" Use register name 'all' to print all GPRs\n");
+ printf(" Use register name 'allf' to print all GPRs and FPRs\n");
+ printf("printobject (alias 'po')\n");
+ printf(" printobject <register>\n");
+ printf(" Print an object from a register\n");
+ printf("stack\n");
+ printf(" stack [<words>]\n");
+ printf(" Dump stack content, default dump 10 words)\n");
+ printf("mem\n");
+ printf(" mem <address> [<words>]\n");
+ printf(" Dump memory content, default dump 10 words)\n");
+ printf("watch\n");
+ printf(" watch <address> \n");
+ printf(" watch memory content.)\n");
+ printf("flags\n");
+ printf(" print flags\n");
+ printf("disasm (alias 'di')\n");
+ printf(" disasm [<instructions>]\n");
+ printf(" disasm [<address/register>] (e.g., disasm pc) \n");
+ printf(" disasm [[<address/register>] <instructions>]\n");
+ printf(" Disassemble code, default is 10 instructions\n");
+ printf(" from pc\n");
+ printf("gdb \n");
+ printf(" Return to gdb if the simulator was started with gdb\n");
+ printf("break (alias 'b')\n");
+ printf(" break : list all breakpoints\n");
+ printf(" break <address> : set / enable / disable a breakpoint.\n");
+ printf("tbreak\n");
+ printf(" tbreak : list all breakpoints\n");
+ printf(
+ " tbreak <address> : set / enable / disable a temporary "
+ "breakpoint.\n");
+ printf(" Set a breakpoint enabled only for one stop. \n");
+ printf("stop feature:\n");
+ printf(" Description:\n");
+ printf(" Stops are debug instructions inserted by\n");
+ printf(" the Assembler::stop() function.\n");
+ printf(" When hitting a stop, the Simulator will\n");
+ printf(" stop and give control to the Debugger.\n");
+ printf(" All stop codes are watched:\n");
+ printf(" - They can be enabled / disabled: the Simulator\n");
+ printf(" will / won't stop when hitting them.\n");
+ printf(" - The Simulator keeps track of how many times they \n");
+ printf(" are met. (See the info command.) Going over a\n");
+ printf(" disabled stop still increases its counter. \n");
+ printf(" Commands:\n");
+ printf(" stop info all/<code> : print infos about number <code>\n");
+ printf(" or all stop(s).\n");
+ printf(" stop enable/disable all/<code> : enables / disables\n");
+ printf(" all or number <code> stop(s)\n");
+ } else {
+ printf("Unknown command: %s\n", cmd);
+ }
+ }
+ }
+
+# undef COMMAND_SIZE
+# undef ARG_SIZE
+
+# undef STR
+# undef XSTR
+}
+
+void Simulator::SetBreakpoint(SimInstruction* location, bool is_tbreak) {
+ for (unsigned i = 0; i < breakpoints_.size(); i++) {
+ if (breakpoints_.at(i).location == location) {
+ if (breakpoints_.at(i).is_tbreak != is_tbreak) {
+ printf("Change breakpoint at %p to %s breakpoint\n",
+ reinterpret_cast<void*>(location),
+ is_tbreak ? "temporary" : "regular");
+ breakpoints_.at(i).is_tbreak = is_tbreak;
+ return;
+ }
+ printf("Existing breakpoint at %p was %s\n",
+ reinterpret_cast<void*>(location),
+ breakpoints_.at(i).enabled ? "disabled" : "enabled");
+ breakpoints_.at(i).enabled = !breakpoints_.at(i).enabled;
+ return;
+ }
+ }
+ Breakpoint new_breakpoint = {location, true, is_tbreak};
+ breakpoints_.push_back(new_breakpoint);
+ printf("Set a %sbreakpoint at %p\n", is_tbreak ? "temporary " : "",
+ reinterpret_cast<void*>(location));
+}
+
+void Simulator::ListBreakpoints() {
+ printf("Breakpoints:\n");
+ for (unsigned i = 0; i < breakpoints_.size(); i++) {
+ printf("%p : %s %s\n",
+ reinterpret_cast<void*>(breakpoints_.at(i).location),
+ breakpoints_.at(i).enabled ? "enabled" : "disabled",
+ breakpoints_.at(i).is_tbreak ? ": temporary" : "");
+ }
+}
+
+void Simulator::CheckBreakpoints() {
+ bool hit_a_breakpoint = false;
+ bool is_tbreak = false;
+ SimInstruction* pc_ = reinterpret_cast<SimInstruction*>(get_pc());
+ for (unsigned i = 0; i < breakpoints_.size(); i++) {
+ if ((breakpoints_.at(i).location == pc_) && breakpoints_.at(i).enabled) {
+ hit_a_breakpoint = true;
+ if (breakpoints_.at(i).is_tbreak) {
+ // Disable a temporary breakpoint.
+ is_tbreak = true;
+ breakpoints_.at(i).enabled = false;
+ }
+ break;
+ }
+ }
+ if (hit_a_breakpoint) {
+ printf("Hit %sa breakpoint at %p.\n", is_tbreak ? "and disabled " : "",
+ reinterpret_cast<void*>(pc_));
+ RiscvDebugger dbg(this);
+ dbg.Debug();
+ }
+}
+
+static bool AllOnOnePage(uintptr_t start, int size) {
+ intptr_t start_page = (start & ~CachePage::kPageMask);
+ intptr_t end_page = ((start + size) & ~CachePage::kPageMask);
+ return start_page == end_page;
+}
+
+void Simulator::setLastDebuggerInput(char* input) {
+ js_free(lastDebuggerInput_);
+ lastDebuggerInput_ = input;
+}
+
+static CachePage* GetCachePageLocked(SimulatorProcess::ICacheMap& i_cache,
+ void* page) {
+ SimulatorProcess::ICacheMap::AddPtr p = i_cache.lookupForAdd(page);
+ if (p) {
+ return p->value();
+ }
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ CachePage* new_page = js_new<CachePage>();
+ if (!new_page || !i_cache.add(p, page, new_page)) {
+ oomUnsafe.crash("Simulator CachePage");
+ }
+ return new_page;
+}
+
+// Flush from start up to and not including start + size.
+static void FlushOnePageLocked(SimulatorProcess::ICacheMap& i_cache,
+ intptr_t start, int size) {
+ MOZ_ASSERT(size <= CachePage::kPageSize);
+ MOZ_ASSERT(AllOnOnePage(start, size - 1));
+ MOZ_ASSERT((start & CachePage::kLineMask) == 0);
+ MOZ_ASSERT((size & CachePage::kLineMask) == 0);
+ void* page = reinterpret_cast<void*>(start & (~CachePage::kPageMask));
+ int offset = (start & CachePage::kPageMask);
+ CachePage* cache_page = GetCachePageLocked(i_cache, page);
+ char* valid_bytemap = cache_page->validityByte(offset);
+ memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
+}
+
+static void FlushICacheLocked(SimulatorProcess::ICacheMap& i_cache,
+ void* start_addr, size_t size) {
+ intptr_t start = reinterpret_cast<intptr_t>(start_addr);
+ int intra_line = (start & CachePage::kLineMask);
+ start -= intra_line;
+ size += intra_line;
+ size = ((size - 1) | CachePage::kLineMask) + 1;
+ int offset = (start & CachePage::kPageMask);
+ while (!AllOnOnePage(start, size - 1)) {
+ int bytes_to_flush = CachePage::kPageSize - offset;
+ FlushOnePageLocked(i_cache, start, bytes_to_flush);
+ start += bytes_to_flush;
+ size -= bytes_to_flush;
+ MOZ_ASSERT((start & CachePage::kPageMask) == 0);
+ offset = 0;
+ }
+ if (size != 0) {
+ FlushOnePageLocked(i_cache, start, size);
+ }
+}
+
+/* static */
+void SimulatorProcess::checkICacheLocked(SimInstruction* instr) {
+ intptr_t address = reinterpret_cast<intptr_t>(instr);
+ void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
+ void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
+ int offset = (address & CachePage::kPageMask);
+ CachePage* cache_page = GetCachePageLocked(icache(), page);
+ char* cache_valid_byte = cache_page->validityByte(offset);
+ bool cache_hit = (*cache_valid_byte == CachePage::LINE_VALID);
+ char* cached_line = cache_page->cachedData(offset & ~CachePage::kLineMask);
+
+ if (cache_hit) {
+ // Check that the data in memory matches the contents of the I-cache.
+ int cmpret = memcmp(reinterpret_cast<void*>(instr),
+ cache_page->cachedData(offset), kInstrSize);
+ MOZ_ASSERT(cmpret == 0);
+ } else {
+ // Cache miss. Load memory into the cache.
+ memcpy(cached_line, line, CachePage::kLineLength);
+ *cache_valid_byte = CachePage::LINE_VALID;
+ }
+}
+
+HashNumber SimulatorProcess::ICacheHasher::hash(const Lookup& l) {
+ return U32(reinterpret_cast<uintptr_t>(l)) >> 2;
+}
+
+bool SimulatorProcess::ICacheHasher::match(const Key& k, const Lookup& l) {
+ MOZ_ASSERT((reinterpret_cast<intptr_t>(k) & CachePage::kPageMask) == 0);
+ MOZ_ASSERT((reinterpret_cast<intptr_t>(l) & CachePage::kPageMask) == 0);
+ return k == l;
+}
+
+/* static */
+void SimulatorProcess::FlushICache(void* start_addr, size_t size) {
+ if (!ICacheCheckingDisableCount) {
+ AutoLockSimulatorCache als;
+ js::jit::FlushICacheLocked(icache(), start_addr, size);
+ }
+}
+
+Simulator::Simulator() {
+ // Set up simulator support first. Some of this information is needed to
+ // setup the architecture state.
+
+ // Note, allocation and anything that depends on allocated memory is
+ // deferred until init(), in order to handle OOM properly.
+
+ stack_ = nullptr;
+ stackLimit_ = 0;
+ pc_modified_ = false;
+ icount_ = 0;
+ break_count_ = 0;
+ break_pc_ = nullptr;
+ break_instr_ = 0;
+ single_stepping_ = false;
+ single_step_callback_ = nullptr;
+ single_step_callback_arg_ = nullptr;
+
+ // Set up architecture state.
+ // All registers are initialized to zero to start with.
+ for (int i = 0; i < Simulator::Register::kNumSimuRegisters; i++) {
+ registers_[i] = 0;
+ }
+ for (int i = 0; i < Simulator::FPURegister::kNumFPURegisters; i++) {
+ FPUregisters_[i] = 0;
+ }
+ FCSR_ = 0;
+ LLBit_ = false;
+ LLAddr_ = 0;
+ lastLLValue_ = 0;
+
+ // The ra and pc are initialized to a known bad value that will cause an
+ // access violation if the simulator ever tries to execute it.
+ registers_[pc] = bad_ra;
+ registers_[ra] = bad_ra;
+
+ for (int i = 0; i < kNumExceptions; i++) {
+ exceptions[i] = 0;
+ }
+
+ lastDebuggerInput_ = nullptr;
+}
+
+bool Simulator::init() {
+ // Allocate 2MB for the stack. Note that we will only use 1MB, see below.
+ static const size_t stackSize = 2 * 1024 * 1024;
+ stack_ = js_pod_malloc<char>(stackSize);
+ if (!stack_) {
+ return false;
+ }
+
+ // Leave a safety margin of 1MB to prevent overrunning the stack when
+ // pushing values (total stack size is 2MB).
+ stackLimit_ = reinterpret_cast<uintptr_t>(stack_) + 1024 * 1024;
+
+ // The sp is initialized to point to the bottom (high address) of the
+ // allocated stack area. To be safe in potential stack underflows we leave
+ // some buffer below.
+ registers_[sp] = reinterpret_cast<int64_t>(stack_) + stackSize - 64;
+
+ return true;
+}
+
+// When the generated code calls an external reference we need to catch that in
+// the simulator. The external reference will be a function compiled for the
+// host architecture. We need to call that function instead of trying to
+// execute it with the simulator. We do that by redirecting the external
+// reference to a swi (software-interrupt) instruction that is handled by
+// the simulator. We write the original destination of the jump just at a known
+// offset from the swi instruction so the simulator knows what to call.
+class Redirection {
+ friend class SimulatorProcess;
+
+ // sim's lock must already be held.
+ Redirection(void* nativeFunction, ABIFunctionType type)
+ : nativeFunction_(nativeFunction),
+ swiInstruction_(rtCallRedirInstr),
+ type_(type),
+ next_(nullptr) {
+ next_ = SimulatorProcess::redirection();
+ if (!SimulatorProcess::ICacheCheckingDisableCount) {
+ FlushICacheLocked(SimulatorProcess::icache(), addressOfSwiInstruction(),
+ kInstrSize);
+ }
+ SimulatorProcess::setRedirection(this);
+ }
+
+ public:
+ void* addressOfSwiInstruction() { return &swiInstruction_; }
+ void* nativeFunction() const { return nativeFunction_; }
+ ABIFunctionType type() const { return type_; }
+
+ static Redirection* Get(void* nativeFunction, ABIFunctionType type) {
+ AutoLockSimulatorCache als;
+
+ Redirection* current = SimulatorProcess::redirection();
+ for (; current != nullptr; current = current->next_) {
+ if (current->nativeFunction_ == nativeFunction) {
+ MOZ_ASSERT(current->type() == type);
+ return current;
+ }
+ }
+
+ // Note: we can't use js_new here because the constructor is private.
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ Redirection* redir = js_pod_malloc<Redirection>(1);
+ if (!redir) {
+ oomUnsafe.crash("Simulator redirection");
+ }
+ new (redir) Redirection(nativeFunction, type);
+ return redir;
+ }
+
+ static Redirection* FromSwiInstruction(Instruction* swiInstruction) {
+ uint8_t* addrOfSwi = reinterpret_cast<uint8_t*>(swiInstruction);
+ uint8_t* addrOfRedirection =
+ addrOfSwi - offsetof(Redirection, swiInstruction_);
+ return reinterpret_cast<Redirection*>(addrOfRedirection);
+ }
+
+ private:
+ void* nativeFunction_;
+ uint32_t swiInstruction_;
+ ABIFunctionType type_;
+ Redirection* next_;
+};
+
+Simulator::~Simulator() { js_free(stack_); }
+
+SimulatorProcess::SimulatorProcess()
+ : cacheLock_(mutexid::SimulatorCacheLock), redirection_(nullptr) {
+ if (getenv("MIPS_SIM_ICACHE_CHECKS")) {
+ ICacheCheckingDisableCount = 0;
+ }
+}
+
+SimulatorProcess::~SimulatorProcess() {
+ Redirection* r = redirection_;
+ while (r) {
+ Redirection* next = r->next_;
+ js_delete(r);
+ r = next;
+ }
+}
+
+/* static */
+void* Simulator::RedirectNativeFunction(void* nativeFunction,
+ ABIFunctionType type) {
+ Redirection* redirection = Redirection::Get(nativeFunction, type);
+ return redirection->addressOfSwiInstruction();
+}
+
+// Get the active Simulator for the current thread.
+Simulator* Simulator::Current() {
+ JSContext* cx = TlsContext.get();
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(cx->runtime()));
+ return cx->simulator();
+}
+
+// Sets the register in the architecture state. It will also deal with updating
+// Simulator internal state for special registers such as PC.
+void Simulator::setRegister(int reg, int64_t value) {
+ MOZ_ASSERT((reg >= 0) && (reg < Simulator::Register::kNumSimuRegisters));
+ if (reg == pc) {
+ pc_modified_ = true;
+ }
+
+ // Zero register always holds 0.
+ registers_[reg] = (reg == 0) ? 0 : value;
+}
+
+void Simulator::setFpuRegister(int fpureg, int64_t value) {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ FPUregisters_[fpureg] = value;
+}
+
+void Simulator::setFpuRegisterLo(int fpureg, int32_t value) {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ *mozilla::BitwiseCast<int32_t*>(&FPUregisters_[fpureg]) = value;
+}
+
+void Simulator::setFpuRegisterHi(int fpureg, int32_t value) {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ *((mozilla::BitwiseCast<int32_t*>(&FPUregisters_[fpureg])) + 1) = value;
+}
+
+void Simulator::setFpuRegisterFloat(int fpureg, float value) {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ *mozilla::BitwiseCast<int64_t*>(&FPUregisters_[fpureg]) = box_float(value);
+}
+
+void Simulator::setFpuRegisterFloat(int fpureg, Float32 value) {
+ MOZ_ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ Float64 t = Float64::FromBits(box_float(value.get_bits()));
+ memcpy(&FPUregisters_[fpureg], &t, 8);
+}
+
+void Simulator::setFpuRegisterDouble(int fpureg, double value) {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ *mozilla::BitwiseCast<double*>(&FPUregisters_[fpureg]) = value;
+}
+
+void Simulator::setFpuRegisterDouble(int fpureg, Float64 value) {
+ MOZ_ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ memcpy(&FPUregisters_[fpureg], &value, 8);
+}
+
+// Get the register from the architecture state. This function does handle
+// the special case of accessing the PC register.
+int64_t Simulator::getRegister(int reg) const {
+ MOZ_ASSERT((reg >= 0) && (reg < Simulator::Register::kNumSimuRegisters));
+ if (reg == 0) {
+ return 0;
+ }
+ return registers_[reg] + ((reg == pc) ? SimInstruction::kPCReadOffset : 0);
+}
+
+int64_t Simulator::getFpuRegister(int fpureg) const {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ return FPUregisters_[fpureg];
+}
+
+int32_t Simulator::getFpuRegisterLo(int fpureg) const {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ return *mozilla::BitwiseCast<int32_t*>(&FPUregisters_[fpureg]);
+}
+
+int32_t Simulator::getFpuRegisterHi(int fpureg) const {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ return *((mozilla::BitwiseCast<int32_t*>(&FPUregisters_[fpureg])) + 1);
+}
+
+float Simulator::getFpuRegisterFloat(int fpureg) const {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ return *mozilla::BitwiseCast<float*>(&FPUregisters_[fpureg]);
+}
+
+Float32 Simulator::getFpuRegisterFloat32(int fpureg) const {
+ MOZ_ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ if (!is_boxed_float(FPUregisters_[fpureg])) {
+ return Float32::FromBits(0x7ffc0000);
+ }
+ return Float32::FromBits(
+ *bit_cast<uint32_t*>(const_cast<int64_t*>(&FPUregisters_[fpureg])));
+}
+
+double Simulator::getFpuRegisterDouble(int fpureg) const {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ return *mozilla::BitwiseCast<double*>(&FPUregisters_[fpureg]);
+}
+
+Float64 Simulator::getFpuRegisterFloat64(int fpureg) const {
+ MOZ_ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ return Float64::FromBits(FPUregisters_[fpureg]);
+}
+
+void Simulator::setCallResultDouble(double result) {
+ setFpuRegisterDouble(fa0, result);
+}
+
+void Simulator::setCallResultFloat(float result) {
+ setFpuRegisterFloat(fa0, result);
+}
+
+void Simulator::setCallResult(int64_t res) { setRegister(a0, res); }
+
+void Simulator::setCallResult(__int128_t res) {
+ setRegister(a0, I64(res));
+ setRegister(a1, I64(res >> 64));
+}
+
+// Raw access to the PC register.
+void Simulator::set_pc(int64_t value) {
+ pc_modified_ = true;
+ registers_[pc] = value;
+}
+
+bool Simulator::has_bad_pc() const {
+ return ((registers_[pc] == bad_ra) || (registers_[pc] == end_sim_pc));
+}
+
+// Raw access to the PC register without the special adjustment when reading.
+int64_t Simulator::get_pc() const { return registers_[pc]; }
+
+JS::ProfilingFrameIterator::RegisterState Simulator::registerState() {
+ wasm::RegisterState state;
+ state.pc = (void*)get_pc();
+ state.fp = (void*)getRegister(fp);
+ state.sp = (void*)getRegister(sp);
+ state.lr = (void*)getRegister(ra);
+ return state;
+}
+
+// TODO(plind): consider making icount_ printing a flag option.
+template <typename T>
+void Simulator::TraceMemRd(sreg_t addr, T value, sreg_t reg_value) {
+ if (FLAG_trace_sim) {
+ if (std::is_integral<T>::value) {
+ switch (sizeof(T)) {
+ case 1:
+ SNPrintF(trace_buf_,
+ "%016" REGIx_FORMAT " (%" PRId64 ") int8:%" PRId8
+ " uint8:%" PRIu8 " <-- [addr: %" REGIx_FORMAT "]",
+ reg_value, icount_, static_cast<int8_t>(value),
+ static_cast<uint8_t>(value), addr);
+ break;
+ case 2:
+ SNPrintF(trace_buf_,
+ "%016" REGIx_FORMAT " (%" PRId64 ") int16:%" PRId16
+ " uint16:%" PRIu16 " <-- [addr: %" REGIx_FORMAT "]",
+ reg_value, icount_, static_cast<int16_t>(value),
+ static_cast<uint16_t>(value), addr);
+ break;
+ case 4:
+ SNPrintF(trace_buf_,
+ "%016" REGIx_FORMAT " (%" PRId64 ") int32:%" PRId32
+ " uint32:%" PRIu32 " <-- [addr: %" REGIx_FORMAT "]",
+ reg_value, icount_, static_cast<int32_t>(value),
+ static_cast<uint32_t>(value), addr);
+ break;
+ case 8:
+ SNPrintF(trace_buf_,
+ "%016" REGIx_FORMAT " (%" PRId64 ") int64:%" PRId64
+ " uint64:%" PRIu64 " <-- [addr: %" REGIx_FORMAT "]",
+ reg_value, icount_, static_cast<int64_t>(value),
+ static_cast<uint64_t>(value), addr);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else if (std::is_same<float, T>::value) {
+ SNPrintF(trace_buf_,
+ "%016" REGIx_FORMAT " (%" PRId64
+ ") flt:%e <-- [addr: %" REGIx_FORMAT "]",
+ reg_value, icount_, static_cast<float>(value), addr);
+ } else if (std::is_same<double, T>::value) {
+ SNPrintF(trace_buf_,
+ "%016" REGIx_FORMAT " (%" PRId64
+ ") dbl:%e <-- [addr: %" REGIx_FORMAT "]",
+ reg_value, icount_, static_cast<double>(value), addr);
+ } else {
+ UNREACHABLE();
+ }
+ }
+}
+
+void Simulator::TraceMemRdFloat(sreg_t addr, Float32 value, int64_t reg_value) {
+ if (FLAG_trace_sim) {
+ SNPrintF(trace_buf_,
+ "%016" PRIx64 " (%" PRId64
+ ") flt:%e <-- [addr: %" REGIx_FORMAT "]",
+ reg_value, icount_, static_cast<float>(value.get_scalar()), addr);
+ }
+}
+
+void Simulator::TraceMemRdDouble(sreg_t addr, double value, int64_t reg_value) {
+ if (FLAG_trace_sim) {
+ SNPrintF(trace_buf_,
+ "%016" PRIx64 " (%" PRId64
+ ") dbl:%e <-- [addr: %" REGIx_FORMAT "]",
+ reg_value, icount_, static_cast<double>(value), addr);
+ }
+}
+
+void Simulator::TraceMemRdDouble(sreg_t addr, Float64 value,
+ int64_t reg_value) {
+ if (FLAG_trace_sim) {
+ SNPrintF(trace_buf_,
+ "%016" PRIx64 " (%" PRId64
+ ") dbl:%e <-- [addr: %" REGIx_FORMAT "]",
+ reg_value, icount_, static_cast<double>(value.get_scalar()), addr);
+ }
+}
+
+template <typename T>
+void Simulator::TraceMemWr(sreg_t addr, T value) {
+ if (FLAG_trace_sim) {
+ switch (sizeof(T)) {
+ case 1:
+ SNPrintF(trace_buf_,
+ " (%" PRIu64 ") int8:%" PRId8
+ " uint8:%" PRIu8 " --> [addr: %" REGIx_FORMAT "]",
+ icount_, static_cast<int8_t>(value),
+ static_cast<uint8_t>(value), addr);
+ break;
+ case 2:
+ SNPrintF(trace_buf_,
+ " (%" PRIu64 ") int16:%" PRId16
+ " uint16:%" PRIu16 " --> [addr: %" REGIx_FORMAT "]",
+ icount_, static_cast<int16_t>(value),
+ static_cast<uint16_t>(value), addr);
+ break;
+ case 4:
+ if (std::is_integral<T>::value) {
+ SNPrintF(trace_buf_,
+ " (%" PRIu64 ") int32:%" PRId32
+ " uint32:%" PRIu32 " --> [addr: %" REGIx_FORMAT "]",
+ icount_, static_cast<int32_t>(value),
+ static_cast<uint32_t>(value), addr);
+ } else {
+ SNPrintF(trace_buf_,
+ " (%" PRIu64
+ ") flt:%e --> [addr: %" REGIx_FORMAT "]",
+ icount_, static_cast<float>(value), addr);
+ }
+ break;
+ case 8:
+ if (std::is_integral<T>::value) {
+ SNPrintF(trace_buf_,
+ " (%" PRIu64 ") int64:%" PRId64
+ " uint64:%" PRIu64 " --> [addr: %" REGIx_FORMAT "]",
+ icount_, static_cast<int64_t>(value),
+ static_cast<uint64_t>(value), addr);
+ } else {
+ SNPrintF(trace_buf_,
+ " (%" PRIu64
+ ") dbl:%e --> [addr: %" REGIx_FORMAT "]",
+ icount_, static_cast<double>(value), addr);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+
+void Simulator::TraceMemWrDouble(sreg_t addr, double value) {
+ if (FLAG_trace_sim) {
+ SNPrintF(trace_buf_,
+ " (%" PRIu64
+ ") dbl:%e --> [addr: %" REGIx_FORMAT "]",
+ icount_, value, addr);
+ }
+}
+
+template <typename T>
+void Simulator::TraceLr(sreg_t addr, T value, sreg_t reg_value) {
+ if (FLAG_trace_sim) {
+ if (std::is_integral<T>::value) {
+ switch (sizeof(T)) {
+ case 4:
+ SNPrintF(trace_buf_,
+ "%016" REGIx_FORMAT " (%" PRId64 ") int32:%" PRId32
+ " uint32:%" PRIu32 " <-- [addr: %" REGIx_FORMAT "]",
+ reg_value, icount_, static_cast<int32_t>(value),
+ static_cast<uint32_t>(value), addr);
+ break;
+ case 8:
+ SNPrintF(trace_buf_,
+ "%016" REGIx_FORMAT " (%" PRId64 ") int64:%" PRId64
+ " uint64:%" PRIu64 " <-- [addr: %" REGIx_FORMAT "]",
+ reg_value, icount_, static_cast<int64_t>(value),
+ static_cast<uint64_t>(value), addr);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ UNREACHABLE();
+ }
+ }
+}
+
+template <typename T>
+void Simulator::TraceSc(sreg_t addr, T value) {
+ if (FLAG_trace_sim) {
+ switch (sizeof(T)) {
+ case 4:
+ SNPrintF(trace_buf_,
+ "%016" REGIx_FORMAT " (%" PRIu64 ") int32:%" PRId32
+ " uint32:%" PRIu32 " --> [addr: %" REGIx_FORMAT "]",
+ getRegister(rd_reg()), icount_, static_cast<int32_t>(value),
+ static_cast<uint32_t>(value), addr);
+ break;
+ case 8:
+ SNPrintF(trace_buf_,
+ "%016" REGIx_FORMAT " (%" PRIu64 ") int64:%" PRId64
+ " uint64:%" PRIu64 " --> [addr: %" REGIx_FORMAT "]",
+ getRegister(rd_reg()), icount_, static_cast<int64_t>(value),
+ static_cast<uint64_t>(value), addr);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+
+// TODO(RISCV): check whether the specific board supports unaligned load/store
+// (determined by EEI). For now, we assume the board does not support unaligned
+// load/store (e.g., trapping)
+template <typename T>
+T Simulator::ReadMem(sreg_t addr, Instruction* instr) {
+ if (handleWasmSegFault(addr, sizeof(T))) {
+ return -1;
+ }
+ if (addr >= 0 && addr < 0x400) {
+ // This has to be a nullptr-dereference, drop into debugger.
+ printf("Memory read from bad address: 0x%08" REGIx_FORMAT
+ " , pc=0x%08" PRIxPTR " \n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ DieOrDebug();
+ }
+ T* ptr = reinterpret_cast<T*>(addr);
+ T value = *ptr;
+ return value;
+}
+
+template <typename T>
+void Simulator::WriteMem(sreg_t addr, T value, Instruction* instr) {
+ if (handleWasmSegFault(addr, sizeof(T))) {
+ value = -1;
+ return;
+ }
+ if (addr >= 0 && addr < 0x400) {
+ // This has to be a nullptr-dereference, drop into debugger.
+ printf("Memory write to bad address: 0x%08" REGIx_FORMAT
+ " , pc=0x%08" PRIxPTR " \n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ DieOrDebug();
+ }
+ T* ptr = reinterpret_cast<T*>(addr);
+ if (!std::is_same<double, T>::value) {
+ TraceMemWr(addr, value);
+ } else {
+ TraceMemWrDouble(addr, value);
+ }
+ *ptr = value;
+}
+
+template <>
+void Simulator::WriteMem(sreg_t addr, Float32 value, Instruction* instr) {
+ if (handleWasmSegFault(addr, 4)) {
+ value = Float32(-1.0f);
+ return;
+ }
+ if (addr >= 0 && addr < 0x400) {
+ // This has to be a nullptr-dereference, drop into debugger.
+ printf("Memory write to bad address: 0x%08" REGIx_FORMAT
+ " , pc=0x%08" PRIxPTR " \n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ DieOrDebug();
+ }
+ float* ptr = reinterpret_cast<float*>(addr);
+ TraceMemWr(addr, value.get_scalar());
+ memcpy(ptr, &value, 4);
+}
+
+template <>
+void Simulator::WriteMem(sreg_t addr, Float64 value, Instruction* instr) {
+ if (handleWasmSegFault(addr, 8)) {
+ value = Float64(-1.0);
+ return;
+ }
+ if (addr >= 0 && addr < 0x400) {
+ // This has to be a nullptr-dereference, drop into debugger.
+ printf("Memory write to bad address: 0x%08" REGIx_FORMAT
+ " , pc=0x%08" PRIxPTR " \n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ DieOrDebug();
+ }
+ double* ptr = reinterpret_cast<double*>(addr);
+ TraceMemWrDouble(addr, value.get_scalar());
+ memcpy(ptr, &value, 8);
+}
+
+uintptr_t Simulator::stackLimit() const { return stackLimit_; }
+
+uintptr_t* Simulator::addressOfStackLimit() { return &stackLimit_; }
+
+bool Simulator::overRecursed(uintptr_t newsp) const {
+ if (newsp == 0) {
+ newsp = getRegister(sp);
+ }
+ return newsp <= stackLimit();
+}
+
+bool Simulator::overRecursedWithExtra(uint32_t extra) const {
+ uintptr_t newsp = getRegister(sp) - extra;
+ return newsp <= stackLimit();
+}
+
+// Unsupported instructions use format to print an error and stop execution.
+void Simulator::format(SimInstruction* instr, const char* format) {
+ printf("Simulator found unsupported instruction:\n 0x%016lx: %s\n",
+ reinterpret_cast<intptr_t>(instr), format);
+ MOZ_CRASH();
+}
+
+// Note: With the code below we assume that all runtime calls return a 64 bits
+// result. If they don't, the v1 result register contains a bogus value, which
+// is fine because it is caller-saved.
+typedef int64_t (*Prototype_General0)();
+typedef int64_t (*Prototype_General1)(int64_t arg0);
+typedef int64_t (*Prototype_General2)(int64_t arg0, int64_t arg1);
+typedef int64_t (*Prototype_General3)(int64_t arg0, int64_t arg1, int64_t arg2);
+typedef int64_t (*Prototype_General4)(int64_t arg0, int64_t arg1, int64_t arg2,
+ int64_t arg3);
+typedef int64_t (*Prototype_General5)(int64_t arg0, int64_t arg1, int64_t arg2,
+ int64_t arg3, int64_t arg4);
+typedef int64_t (*Prototype_General6)(int64_t arg0, int64_t arg1, int64_t arg2,
+ int64_t arg3, int64_t arg4, int64_t arg5);
+typedef int64_t (*Prototype_General7)(int64_t arg0, int64_t arg1, int64_t arg2,
+ int64_t arg3, int64_t arg4, int64_t arg5,
+ int64_t arg6);
+typedef int64_t (*Prototype_General8)(int64_t arg0, int64_t arg1, int64_t arg2,
+ int64_t arg3, int64_t arg4, int64_t arg5,
+ int64_t arg6, int64_t arg7);
+typedef int64_t (*Prototype_GeneralGeneralGeneralInt64)(int64_t arg0,
+ int64_t arg1,
+ int64_t arg2,
+ int64_t arg3);
+typedef int64_t (*Prototype_GeneralGeneralInt64Int64)(int64_t arg0,
+ int64_t arg1,
+ int64_t arg2,
+ int64_t arg3);
+
+typedef int64_t (*Prototype_Int_Double)(double arg0);
+typedef int64_t (*Prototype_Int_IntDouble)(int64_t arg0, double arg1);
+typedef int64_t (*Prototype_Int_DoubleInt)(double arg0, int64_t arg1);
+typedef int64_t (*Prototype_Int_DoubleIntInt)(double arg0, int64_t arg1,
+ int64_t arg2);
+typedef int64_t (*Prototype_Int_IntDoubleIntInt)(int64_t arg0, double arg1,
+ int64_t arg2, int64_t arg3);
+
+typedef float (*Prototype_Float32_Float32)(float arg0);
+typedef int64_t (*Prototype_Int_Float32)(float arg0);
+typedef float (*Prototype_Float32_Float32Float32)(float arg0, float arg1);
+
+typedef double (*Prototype_Double_None)();
+typedef double (*Prototype_Double_Double)(double arg0);
+typedef double (*Prototype_Double_Int)(int64_t arg0);
+typedef double (*Prototype_Double_DoubleInt)(double arg0, int64_t arg1);
+typedef double (*Prototype_Double_IntDouble)(int64_t arg0, double arg1);
+typedef double (*Prototype_Double_DoubleDouble)(double arg0, double arg1);
+typedef double (*Prototype_Double_DoubleDoubleDouble)(double arg0, double arg1,
+ double arg2);
+typedef double (*Prototype_Double_DoubleDoubleDoubleDouble)(double arg0,
+ double arg1,
+ double arg2,
+ double arg3);
+
+typedef int32_t (*Prototype_Int32_General)(int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32)(int64_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int32)(int64_t, int32_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int32Int32Int32)(int64_t, int32_t,
+ int32_t, int32_t,
+ int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int32Int32Int32Int32)(
+ int64_t, int32_t, int32_t, int32_t, int32_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int32Int32Int32General)(
+ int64_t, int32_t, int32_t, int32_t, int32_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int32Int32General)(
+ int64_t, int32_t, int32_t, int32_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int32Int64)(int64_t, int32_t,
+ int32_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int32General)(int64_t, int32_t,
+ int32_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int64Int64)(int64_t, int32_t,
+ int64_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32GeneralInt32)(int64_t, int32_t,
+ int64_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32GeneralInt32Int32)(
+ int64_t, int32_t, int64_t, int32_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralGeneral)(int64_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralGeneralGeneral)(int64_t, int64_t,
+ int64_t);
+typedef int32_t (*Prototype_Int32_GeneralGeneralInt32Int32)(int64_t, int64_t,
+ int32_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt64Int32Int32Int32)(int64_t, int64_t,
+ int32_t, int32_t,
+ int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt64Int32)(int64_t, int64_t, int32_t);
+
+typedef int32_t (*Prototype_Int32_GeneralGeneralInt32General)(int32_t, int32_t,
+ int32_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt64Int32Int64)(int64_t, int64_t,
+ int32_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt64Int32Int64General)(
+ int64_t, int64_t, int32_t, int64_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt64Int64Int64)(int64_t, int64_t,
+ int64_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt64Int64Int64General)(
+ int64_t, int64_t, int64_t, int64_t, int64_t);
+typedef int64_t (*Prototype_General_GeneralInt32)(int64_t, int32_t);
+typedef int64_t (*Prototype_General_GeneralInt32Int32)(int64_t, int32_t,
+ int32_t);
+typedef int64_t (*Prototype_General_GeneralInt32General)(int64_t, int32_t,
+ int64_t);
+typedef int64_t (*Prototype_General_GeneralInt32Int32GeneralInt32)(
+ int64_t, int32_t, int32_t, int64_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralGeneralInt32GeneralInt32Int32Int32)(
+ int64_t, int64_t, int32_t, int64_t, int32_t, int32_t, int32_t);
+typedef int64_t (*Prototype_Int64_General)(int64_t);
+typedef int64_t (*Prototype_Int64_GeneralInt64)(int64_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt64Int64General)(int64_t, int64_t,
+ int64_t, int64_t);
+// Generated by Assembler::break_()/stop(), ebreak code is passed as immediate
+// field of a subsequent LUI instruction; otherwise returns -1
+static inline uint32_t get_ebreak_code(Instruction* instr) {
+ MOZ_ASSERT(instr->InstructionBits() == kBreakInstr);
+ uint8_t* cur = reinterpret_cast<uint8_t*>(instr);
+ Instruction* next_instr = reinterpret_cast<Instruction*>(cur + kInstrSize);
+ if (next_instr->BaseOpcodeFieldRaw() == LUI)
+ return (next_instr->Imm20UValue());
+ else
+ return -1;
+}
+
+// Software interrupt instructions are used by the simulator to call into C++.
+void Simulator::SoftwareInterrupt() {
+ // There are two instructions that could get us here, the ebreak or ecall
+ // instructions are "SYSTEM" class opcode distinuished by Imm12Value field w/
+ // the rest of instruction fields being zero
+ // We first check if we met a call_rt_redirected.
+ if (instr_.InstructionBits() == rtCallRedirInstr) {
+ Redirection* redirection = Redirection::FromSwiInstruction(instr_.instr());
+ uintptr_t nativeFn =
+ reinterpret_cast<uintptr_t>(redirection->nativeFunction());
+
+ intptr_t arg0 = getRegister(a0);
+ intptr_t arg1 = getRegister(a1);
+ intptr_t arg2 = getRegister(a2);
+ intptr_t arg3 = getRegister(a3);
+ intptr_t arg4 = getRegister(a4);
+ intptr_t arg5 = getRegister(a5);
+ intptr_t arg6 = getRegister(a6);
+ intptr_t arg7 = getRegister(a7);
+
+ // This is dodgy but it works because the C entry stubs are never moved.
+ // See comment in codegen-arm.cc and bug 1242173.
+ intptr_t saved_ra = getRegister(ra);
+
+ intptr_t external =
+ reinterpret_cast<intptr_t>(redirection->nativeFunction());
+
+ bool stack_aligned = (getRegister(sp) & (ABIStackAlignment - 1)) == 0;
+ if (!stack_aligned) {
+ fprintf(stderr, "Runtime call with unaligned stack!\n");
+ MOZ_CRASH();
+ }
+
+ if (single_stepping_) {
+ single_step_callback_(single_step_callback_arg_, this, nullptr);
+ }
+ if (FLAG_trace_sim) {
+ printf(
+ "Call to host function at %p with args %ld, %ld, %ld, %ld, %ld, %ld, "
+ "%ld, %ld\n",
+ reinterpret_cast<void*>(external), arg0, arg1, arg2, arg3, arg4, arg5,
+ arg6, arg7);
+ }
+ switch (redirection->type()) {
+ case Args_General0: {
+ Prototype_General0 target =
+ reinterpret_cast<Prototype_General0>(external);
+ int64_t result = target();
+ if (FLAG_trace_sim) printf("ret %ld\n", result);
+ setCallResult(result);
+ break;
+ }
+ case Args_General1: {
+ Prototype_General1 target =
+ reinterpret_cast<Prototype_General1>(external);
+ int64_t result = target(arg0);
+ if (FLAG_trace_sim) printf("ret %ld\n", result);
+ setCallResult(result);
+ break;
+ }
+ case Args_General2: {
+ Prototype_General2 target =
+ reinterpret_cast<Prototype_General2>(external);
+ int64_t result = target(arg0, arg1);
+ if (FLAG_trace_sim) printf("ret %ld\n", result);
+ setCallResult(result);
+ break;
+ }
+ case Args_General3: {
+ Prototype_General3 target =
+ reinterpret_cast<Prototype_General3>(external);
+ int64_t result = target(arg0, arg1, arg2);
+ if (FLAG_trace_sim) printf("ret %ld\n", result);
+ if (external == intptr_t(&js::wasm::Instance::wake_m32)) {
+ result = int32_t(result);
+ }
+ setCallResult(result);
+ break;
+ }
+ case Args_General4: {
+ Prototype_General4 target =
+ reinterpret_cast<Prototype_General4>(external);
+ int64_t result = target(arg0, arg1, arg2, arg3);
+ if (FLAG_trace_sim) printf("ret %ld\n", result);
+ setCallResult(result);
+ break;
+ }
+ case Args_General5: {
+ Prototype_General5 target =
+ reinterpret_cast<Prototype_General5>(external);
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4);
+ if (FLAG_trace_sim) printf("ret %ld\n", result);
+ setCallResult(result);
+ break;
+ }
+ case Args_General6: {
+ Prototype_General6 target =
+ reinterpret_cast<Prototype_General6>(external);
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
+ if (FLAG_trace_sim) printf("ret %ld\n", result);
+ setCallResult(result);
+ break;
+ }
+ case Args_General7: {
+ Prototype_General7 target =
+ reinterpret_cast<Prototype_General7>(external);
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5, arg6);
+ if (FLAG_trace_sim) printf("ret %ld\n", result);
+ setCallResult(result);
+ break;
+ }
+ case Args_General8: {
+ Prototype_General8 target =
+ reinterpret_cast<Prototype_General8>(external);
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
+ if (FLAG_trace_sim) printf("ret %ld\n", result);
+ setCallResult(result);
+ break;
+ }
+ case Args_Double_None: {
+ Prototype_Double_None target =
+ reinterpret_cast<Prototype_Double_None>(external);
+ double dresult = target();
+ if (FLAG_trace_sim) printf("ret %f\n", dresult);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Int_Double: {
+ double dval0 = getFpuRegisterDouble(fa0);
+ Prototype_Int_Double target =
+ reinterpret_cast<Prototype_Int_Double>(external);
+ int64_t result = target(dval0);
+ if (FLAG_trace_sim) printf("ret %ld\n", result);
+ if (external == intptr_t((int32_t(*)(double))JS::ToInt32)) {
+ result = int32_t(result);
+ }
+ setRegister(a0, result);
+ break;
+ }
+ case Args_Int_GeneralGeneralGeneralInt64: {
+ Prototype_GeneralGeneralGeneralInt64 target =
+ reinterpret_cast<Prototype_GeneralGeneralGeneralInt64>(external);
+ int64_t result = target(arg0, arg1, arg2, arg3);
+ if (FLAG_trace_sim) printf("ret %ld\n", result);
+ if (external == intptr_t(&js::wasm::Instance::wait_i32_m32)) {
+ result = int32_t(result);
+ }
+ setRegister(a0, result);
+ break;
+ }
+ case Args_Int_GeneralGeneralInt64Int64: {
+ Prototype_GeneralGeneralInt64Int64 target =
+ reinterpret_cast<Prototype_GeneralGeneralInt64Int64>(external);
+ int64_t result = target(arg0, arg1, arg2, arg3);
+ if (FLAG_trace_sim) printf("ret %ld\n", result);
+ if (external == intptr_t(&js::wasm::Instance::wait_i64_m32)) {
+ result = int32_t(result);
+ }
+ setRegister(a0, result);
+ break;
+ }
+ case Args_Int_DoubleInt: {
+ double dval = getFpuRegisterDouble(fa0);
+ Prototype_Int_DoubleInt target =
+ reinterpret_cast<Prototype_Int_DoubleInt>(external);
+ int64_t result = target(dval, arg0);
+ if (FLAG_trace_sim) printf("ret %ld\n", result);
+ setRegister(a0, result);
+ break;
+ }
+ case Args_Int_DoubleIntInt: {
+ double dval = getFpuRegisterDouble(fa0);
+ Prototype_Int_DoubleIntInt target =
+ reinterpret_cast<Prototype_Int_DoubleIntInt>(external);
+ int64_t result = target(dval, arg1, arg2);
+ if (FLAG_trace_sim) printf("ret %ld\n", result);
+ setRegister(a0, result);
+ break;
+ }
+ case Args_Int_IntDoubleIntInt: {
+ double dval = getFpuRegisterDouble(fa0);
+ Prototype_Int_IntDoubleIntInt target =
+ reinterpret_cast<Prototype_Int_IntDoubleIntInt>(external);
+ int64_t result = target(arg0, dval, arg2, arg3);
+ if (FLAG_trace_sim) printf("ret %ld\n", result);
+ setRegister(a0, result);
+ break;
+ }
+ case Args_Double_Double: {
+ double dval0 = getFpuRegisterDouble(fa0);
+ Prototype_Double_Double target =
+ reinterpret_cast<Prototype_Double_Double>(external);
+ double dresult = target(dval0);
+ if (FLAG_trace_sim) printf("ret %f\n", dresult);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Float32_Float32: {
+ float fval0;
+ fval0 = getFpuRegisterFloat(fa0);
+ Prototype_Float32_Float32 target =
+ reinterpret_cast<Prototype_Float32_Float32>(external);
+ float fresult = target(fval0);
+ if (FLAG_trace_sim) printf("ret %f\n", fresult);
+ setCallResultFloat(fresult);
+ break;
+ }
+ case Args_Int_Float32: {
+ float fval0;
+ fval0 = getFpuRegisterFloat(fa0);
+ Prototype_Int_Float32 target =
+ reinterpret_cast<Prototype_Int_Float32>(external);
+ int64_t result = target(fval0);
+ if (FLAG_trace_sim) printf("ret %ld\n", result);
+ setRegister(a0, result);
+ break;
+ }
+ case Args_Float32_Float32Float32: {
+ float fval0;
+ float fval1;
+ fval0 = getFpuRegisterFloat(fa0);
+ fval1 = getFpuRegisterFloat(fa1);
+ Prototype_Float32_Float32Float32 target =
+ reinterpret_cast<Prototype_Float32_Float32Float32>(external);
+ float fresult = target(fval0, fval1);
+ if (FLAG_trace_sim) printf("ret %f\n", fresult);
+ setCallResultFloat(fresult);
+ break;
+ }
+ case Args_Double_Int: {
+ Prototype_Double_Int target =
+ reinterpret_cast<Prototype_Double_Int>(external);
+ double dresult = target(arg0);
+ if (FLAG_trace_sim) printf("ret %f\n", dresult);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Double_DoubleInt: {
+ double dval0 = getFpuRegisterDouble(fa0);
+ Prototype_Double_DoubleInt target =
+ reinterpret_cast<Prototype_Double_DoubleInt>(external);
+ double dresult = target(dval0, arg0);
+ if (FLAG_trace_sim) printf("ret %f\n", dresult);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Double_DoubleDouble: {
+ double dval0 = getFpuRegisterDouble(fa0);
+ double dval1 = getFpuRegisterDouble(fa1);
+ Prototype_Double_DoubleDouble target =
+ reinterpret_cast<Prototype_Double_DoubleDouble>(external);
+ double dresult = target(dval0, dval1);
+ if (FLAG_trace_sim) printf("ret %f\n", dresult);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Double_IntDouble: {
+ double dval0 = getFpuRegisterDouble(fa0);
+ Prototype_Double_IntDouble target =
+ reinterpret_cast<Prototype_Double_IntDouble>(external);
+ double dresult = target(arg0, dval0);
+ if (FLAG_trace_sim) printf("ret %f\n", dresult);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Int_IntDouble: {
+ double dval0 = getFpuRegisterDouble(fa0);
+ Prototype_Int_IntDouble target =
+ reinterpret_cast<Prototype_Int_IntDouble>(external);
+ int64_t result = target(arg0, dval0);
+ if (FLAG_trace_sim) printf("ret %ld\n", result);
+ setRegister(a0, result);
+ break;
+ }
+ case Args_Double_DoubleDoubleDouble: {
+ double dval0 = getFpuRegisterDouble(fa0);
+ double dval1 = getFpuRegisterDouble(fa1);
+ double dval2 = getFpuRegisterDouble(fa2);
+ Prototype_Double_DoubleDoubleDouble target =
+ reinterpret_cast<Prototype_Double_DoubleDoubleDouble>(external);
+ double dresult = target(dval0, dval1, dval2);
+ if (FLAG_trace_sim) printf("ret %f\n", dresult);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Double_DoubleDoubleDoubleDouble: {
+ double dval0 = getFpuRegisterDouble(fa0);
+ double dval1 = getFpuRegisterDouble(fa1);
+ double dval2 = getFpuRegisterDouble(fa2);
+ double dval3 = getFpuRegisterDouble(fa3);
+ Prototype_Double_DoubleDoubleDoubleDouble target =
+ reinterpret_cast<Prototype_Double_DoubleDoubleDoubleDouble>(
+ external);
+ double dresult = target(dval0, dval1, dval2, dval3);
+ if (FLAG_trace_sim) printf("ret %f\n", dresult);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Int32_General: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_General>(nativeFn)(arg0);
+ if (FLAG_trace_sim) printf("ret %d\n", ret);
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralInt32: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralInt32>(nativeFn)(
+ arg0, I32(arg1));
+ if (FLAG_trace_sim) printf("ret %d\n", ret);
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralInt32Int32: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralInt32Int32>(
+ nativeFn)(arg0, I32(arg1), I32(arg2));
+ if (FLAG_trace_sim) printf("ret %d\n", ret);
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralInt32Int32Int32Int32: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt32Int32Int32Int32>(
+ nativeFn)(arg0, I32(arg1), I32(arg2), I32(arg3), I32(arg4));
+ if (FLAG_trace_sim) printf("ret %d\n", ret);
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralInt32Int32Int32Int32Int32: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt32Int32Int32Int32Int32>(
+ nativeFn)(arg0, I32(arg1), I32(arg2), I32(arg3), I32(arg4),
+ I32(arg5));
+ if (FLAG_trace_sim) printf("ret %d\n", ret);
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralInt32Int32Int32Int32General: {
+ int32_t ret = reinterpret_cast<
+ Prototype_Int32_GeneralInt32Int32Int32Int32General>(nativeFn)(
+ arg0, I32(arg1), I32(arg2), I32(arg3), I32(arg4), arg5);
+ if (FLAG_trace_sim) printf("ret %d\n", ret);
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralInt32Int32Int32General: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt32Int32Int32General>(
+ nativeFn)(arg0, I32(arg1), I32(arg2), I32(arg3), arg4);
+ if (FLAG_trace_sim) printf("ret %d\n", ret);
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralInt32Int32Int64: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralInt32Int32Int64>(
+ nativeFn)(arg0, I32(arg1), I32(arg2), arg3);
+ if (FLAG_trace_sim) printf("ret %d\n", ret);
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralInt32Int32General: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt32Int32General>(
+ nativeFn)(arg0, I32(arg1), I32(arg2), arg3);
+ if (FLAG_trace_sim) printf("ret %d\n", ret);
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralInt32Int64Int64: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralInt32Int64Int64>(
+ nativeFn)(arg0, I32(arg1), arg2, arg3);
+ if (FLAG_trace_sim) printf("ret %d\n", ret);
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralInt32GeneralInt32: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt32GeneralInt32>(
+ nativeFn)(arg0, I32(arg1), arg2, I32(arg3));
+ if (FLAG_trace_sim) printf("ret %d\n", ret);
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralInt32GeneralInt32Int32: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt32GeneralInt32Int32>(
+ nativeFn)(arg0, I32(arg1), arg2, I32(arg3), I32(arg4));
+ if (FLAG_trace_sim) printf("ret %d\n", ret);
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralGeneral: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralGeneral>(
+ nativeFn)(arg0, arg1);
+ if (FLAG_trace_sim) printf("ret %d\n", ret);
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralGeneralGeneral: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralGeneralGeneral>(
+ nativeFn)(arg0, arg1, arg2);
+ if (FLAG_trace_sim) printf("ret %d\n", ret);
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralGeneralInt32Int32: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralGeneralInt32Int32>(
+ nativeFn)(arg0, arg1, I32(arg2), I32(arg3));
+ if (FLAG_trace_sim) printf("ret %d\n", ret);
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt64Int32Int32Int32: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt64Int32Int32Int32>(
+ nativeFn)(arg0, arg1, I32(arg2), I32(arg3), I32(arg4));
+ if (FLAG_trace_sim) printf("ret %d\n", ret);
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt64Int32: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralInt64Int32>(
+ nativeFn)(arg0, arg1, I32(arg2));
+ if (FLAG_trace_sim) printf("ret %d\n", ret);
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt64Int32Int64: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralInt64Int32Int64>(
+ nativeFn)(arg0, arg1, I32(arg2), arg3);
+ if (FLAG_trace_sim) printf("ret %d\n", ret);
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt64Int32Int64General: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt64Int32Int64General>(
+ nativeFn)(arg0, arg1, I32(arg2), arg3, arg4);
+ if (FLAG_trace_sim) printf("ret %d\n", ret);
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt64Int64Int64: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralInt64Int64Int64>(
+ nativeFn)(arg0, arg1, arg2, arg3);
+ if (FLAG_trace_sim) printf("ret %d\n", ret);
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt64Int64Int64General: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt64Int64Int64General>(
+ nativeFn)(arg0, arg1, arg2, arg3, arg4);
+ if (FLAG_trace_sim) printf("ret %d\n", ret);
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case Args_General_GeneralInt32: {
+ int64_t ret = reinterpret_cast<Prototype_General_GeneralInt32>(
+ nativeFn)(arg0, I32(arg1));
+ if (FLAG_trace_sim) printf("ret %ld\n", ret);
+ setRegister(a0, ret);
+ break;
+ }
+ case Args_General_GeneralInt32Int32: {
+ int64_t ret = reinterpret_cast<Prototype_General_GeneralInt32Int32>(
+ nativeFn)(arg0, I32(arg1), I32(arg2));
+ if (FLAG_trace_sim) printf("ret %ld\n", ret);
+ setRegister(a0, ret);
+ break;
+ }
+ case Args_General_GeneralInt32General: {
+ int64_t ret = reinterpret_cast<Prototype_General_GeneralInt32General>(
+ nativeFn)(arg0, I32(arg1), arg2);
+ if (FLAG_trace_sim) printf("ret %ld\n", ret);
+ setRegister(a0, ret);
+ break;
+ }
+ case js::jit::Args_General_GeneralInt32Int32GeneralInt32: {
+ int64_t ret =
+ reinterpret_cast<Prototype_General_GeneralInt32Int32GeneralInt32>(
+ nativeFn)(arg0, I32(arg1), I32(arg2), arg3, I32(arg4));
+ setRegister(a0, ret);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralGeneralInt32General: {
+ Prototype_Int32_GeneralGeneralInt32General target =
+ reinterpret_cast<Prototype_Int32_GeneralGeneralInt32General>(
+ external);
+ int64_t result = target(I32(arg0), I32(arg1), I32(arg2), I32(arg3));
+ setRegister(a0, I64(result));
+ break;
+ }
+ case js::jit::Args_Int32_GeneralGeneralInt32GeneralInt32Int32Int32: {
+ int64_t arg6 = getRegister(a6);
+ int32_t ret = reinterpret_cast<
+ Prototype_Int32_GeneralGeneralInt32GeneralInt32Int32Int32>(
+ nativeFn)(arg0, arg1, I32(arg2), arg3, I32(arg4), I32(arg5),
+ I32(arg6));
+ setRegister(a0, I64(ret));
+ break;
+ }
+ case js::jit::Args_Int64_General: {
+ int64_t ret = reinterpret_cast<Prototype_Int64_General>(nativeFn)(arg0);
+ if (FLAG_trace_sim) printf("ret %ld\n", ret);
+ setRegister(a0, ret);
+ break;
+ }
+ case js::jit::Args_Int64_GeneralInt64: {
+ int64_t ret = reinterpret_cast<Prototype_Int64_GeneralInt64>(nativeFn)(
+ arg0, arg1);
+ if (FLAG_trace_sim) printf("ret %ld\n", ret);
+ setRegister(a0, ret);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt64Int64General: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt64Int64General>(
+ nativeFn)(arg0, arg1, arg2, arg3);
+ setRegister(a0, I64(ret));
+ break;
+ }
+ default:
+ MOZ_CRASH("Unknown function type.");
+ }
+
+ if (single_stepping_) {
+ single_step_callback_(single_step_callback_arg_, this, nullptr);
+ }
+
+ setRegister(ra, saved_ra);
+ set_pc(getRegister(ra));
+
+ } else if (instr_.InstructionBits() == kBreakInstr &&
+ (get_ebreak_code(instr_.instr()) <= kMaxStopCode)) {
+ uint32_t code = get_ebreak_code(instr_.instr());
+ if (isWatchpoint(code)) {
+ printWatchpoint(code);
+ } else if (IsTracepoint(code)) {
+ if (!FLAG_debug_sim) {
+ MOZ_CRASH("Add --debug-sim when tracepoint instruction is used.\n");
+ }
+ // printf("%d %d %d %d %d %d %d\n", code, code & LOG_TRACE, code &
+ // LOG_REGS,
+ // code & kDebuggerTracingDirectivesMask, TRACE_ENABLE,
+ // TRACE_DISABLE, kDebuggerTracingDirectivesMask);
+ switch (code & kDebuggerTracingDirectivesMask) {
+ case TRACE_ENABLE:
+ if (code & LOG_TRACE) {
+ FLAG_trace_sim = true;
+ }
+ if (code & LOG_REGS) {
+ RiscvDebugger dbg(this);
+ dbg.printAllRegs();
+ }
+ break;
+ case TRACE_DISABLE:
+ if (code & LOG_TRACE) {
+ FLAG_trace_sim = false;
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ increaseStopCounter(code);
+ handleStop(code);
+ }
+ } else {
+ // uint8_t code = get_ebreak_code(instr_.instr()) - kMaxStopCode - 1;
+ // switch (LNode::Opcode(code)) {
+ // #define EMIT_OP(OP, ...) \
+// case LNode::Opcode::OP:\
+// std::cout << #OP << std::endl; \
+// break;
+ // LIR_OPCODE_LIST(EMIT_OP);
+ // #undef EMIT_OP
+ // }
+ DieOrDebug();
+ }
+}
+
+// Stop helper functions.
+bool Simulator::isWatchpoint(uint32_t code) {
+ return (code <= kMaxWatchpointCode);
+}
+
+bool Simulator::IsTracepoint(uint32_t code) {
+ return (code <= kMaxTracepointCode && code > kMaxWatchpointCode);
+}
+
+void Simulator::printWatchpoint(uint32_t code) {
+ RiscvDebugger dbg(this);
+ ++break_count_;
+ if (FLAG_riscv_print_watchpoint) {
+ printf("\n---- break %d marker: %20" PRIi64 " (instr count: %20" PRIi64
+ ") ----\n",
+ code, break_count_, icount_);
+ dbg.printAllRegs(); // Print registers and continue running.
+ }
+}
+
+void Simulator::handleStop(uint32_t code) {
+ // Stop if it is enabled, otherwise go on jumping over the stop
+ // and the message address.
+ if (isEnabledStop(code)) {
+ RiscvDebugger dbg(this);
+ dbg.Debug();
+ } else {
+ set_pc(get_pc() + 2 * kInstrSize);
+ }
+}
+
+bool Simulator::isStopInstruction(SimInstruction* instr) {
+ if (instr->InstructionBits() != kBreakInstr) return false;
+ int32_t code = get_ebreak_code(instr->instr());
+ return code != -1 && static_cast<uint32_t>(code) > kMaxWatchpointCode &&
+ static_cast<uint32_t>(code) <= kMaxStopCode;
+}
+
+bool Simulator::isEnabledStop(uint32_t code) {
+ MOZ_ASSERT(code <= kMaxStopCode);
+ MOZ_ASSERT(code > kMaxWatchpointCode);
+ return !(watchedStops_[code].count_ & kStopDisabledBit);
+}
+
+void Simulator::enableStop(uint32_t code) {
+ if (!isEnabledStop(code)) {
+ watchedStops_[code].count_ &= ~kStopDisabledBit;
+ }
+}
+
+void Simulator::disableStop(uint32_t code) {
+ if (isEnabledStop(code)) {
+ watchedStops_[code].count_ |= kStopDisabledBit;
+ }
+}
+
+void Simulator::increaseStopCounter(uint32_t code) {
+ MOZ_ASSERT(code <= kMaxStopCode);
+ if ((watchedStops_[code].count_ & ~(1 << 31)) == 0x7fffffff) {
+ printf(
+ "Stop counter for code %i has overflowed.\n"
+ "Enabling this code and reseting the counter to 0.\n",
+ code);
+ watchedStops_[code].count_ = 0;
+ enableStop(code);
+ } else {
+ watchedStops_[code].count_++;
+ }
+}
+
+// Print a stop status.
+void Simulator::printStopInfo(uint32_t code) {
+ if (code <= kMaxWatchpointCode) {
+ printf("That is a watchpoint, not a stop.\n");
+ return;
+ } else if (code > kMaxStopCode) {
+ printf("Code too large, only %u stops can be used\n", kMaxStopCode + 1);
+ return;
+ }
+ const char* state = isEnabledStop(code) ? "Enabled" : "Disabled";
+ int32_t count = watchedStops_[code].count_ & ~kStopDisabledBit;
+ // Don't print the state of unused breakpoints.
+ if (count != 0) {
+ if (watchedStops_[code].desc_) {
+ printf("stop %i - 0x%x: \t%s, \tcounter = %i, \t%s\n", code, code, state,
+ count, watchedStops_[code].desc_);
+ } else {
+ printf("stop %i - 0x%x: \t%s, \tcounter = %i\n", code, code, state,
+ count);
+ }
+ }
+}
+
+void Simulator::SignalException(Exception e) {
+ printf("Error: Exception %i raised.", static_cast<int>(e));
+ MOZ_CRASH();
+}
+
+// TODO(plind): refactor this messy debug code when we do unaligned access.
+void Simulator::DieOrDebug() {
+ if (FLAG_riscv_trap_to_simulator_debugger) {
+ RiscvDebugger dbg(this);
+ dbg.Debug();
+ } else {
+ MOZ_CRASH("Die");
+ }
+}
+
+// Executes the current instruction.
+void Simulator::InstructionDecode(Instruction* instr) {
+ // if (FLAG_check_icache) {
+ // CheckICache(SimulatorProcess::icache(), instr);
+ // }
+ pc_modified_ = false;
+
+ EmbeddedVector<char, 256> buffer;
+
+ if (FLAG_trace_sim || FLAG_debug_sim) {
+ SNPrintF(trace_buf_, " ");
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ // Use a reasonably large buffer.
+ dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(instr));
+
+ // printf("EXECUTING 0x%08" PRIxPTR " %-44s\n",
+ // reinterpret_cast<intptr_t>(instr), buffer.begin());
+ }
+
+ instr_ = instr;
+ switch (instr_.InstructionType()) {
+ case Instruction::kRType:
+ DecodeRVRType();
+ break;
+ case Instruction::kR4Type:
+ DecodeRVR4Type();
+ break;
+ case Instruction::kIType:
+ DecodeRVIType();
+ break;
+ case Instruction::kSType:
+ DecodeRVSType();
+ break;
+ case Instruction::kBType:
+ DecodeRVBType();
+ break;
+ case Instruction::kUType:
+ DecodeRVUType();
+ break;
+ case Instruction::kJType:
+ DecodeRVJType();
+ break;
+ case Instruction::kCRType:
+ DecodeCRType();
+ break;
+ case Instruction::kCAType:
+ DecodeCAType();
+ break;
+ case Instruction::kCJType:
+ DecodeCJType();
+ break;
+ case Instruction::kCBType:
+ DecodeCBType();
+ break;
+ case Instruction::kCIType:
+ DecodeCIType();
+ break;
+ case Instruction::kCIWType:
+ DecodeCIWType();
+ break;
+ case Instruction::kCSSType:
+ DecodeCSSType();
+ break;
+ case Instruction::kCLType:
+ DecodeCLType();
+ break;
+ case Instruction::kCSType:
+ DecodeCSType();
+ break;
+# ifdef CAN_USE_RVV_INSTRUCTIONS
+ case Instruction::kVType:
+ DecodeVType();
+ break;
+# endif
+ default:
+ UNSUPPORTED();
+ }
+
+ if (FLAG_trace_sim) {
+ printf(" 0x%012" PRIxPTR " %-44s\t%s\n",
+ reinterpret_cast<intptr_t>(instr), buffer.start(),
+ trace_buf_.start());
+ }
+
+ if (!pc_modified_) {
+ setRegister(pc, reinterpret_cast<sreg_t>(instr) + instr->InstructionSize());
+ }
+
+ if (watch_address_ != nullptr) {
+ printf(" 0x%012" PRIxPTR " : 0x%016" REGIx_FORMAT " %14" REGId_FORMAT
+ " \n",
+ reinterpret_cast<intptr_t>(watch_address_), *watch_address_,
+ *watch_address_);
+ if (watch_value_ != *watch_address_) {
+ RiscvDebugger dbg(this);
+ dbg.Debug();
+ watch_value_ = *watch_address_;
+ }
+ }
+}
+
+void Simulator::enable_single_stepping(SingleStepCallback cb, void* arg) {
+ single_stepping_ = true;
+ single_step_callback_ = cb;
+ single_step_callback_arg_ = arg;
+ single_step_callback_(single_step_callback_arg_, this, (void*)get_pc());
+}
+
+void Simulator::disable_single_stepping() {
+ if (!single_stepping_) {
+ return;
+ }
+ single_step_callback_(single_step_callback_arg_, this, (void*)get_pc());
+ single_stepping_ = false;
+ single_step_callback_ = nullptr;
+ single_step_callback_arg_ = nullptr;
+}
+
+template <bool enableStopSimAt>
+void Simulator::execute() {
+ if (single_stepping_) {
+ single_step_callback_(single_step_callback_arg_, this, nullptr);
+ }
+
+ // Get the PC to simulate. Cannot use the accessor here as we need the
+ // raw PC value and not the one used as input to arithmetic instructions.
+ int64_t program_counter = get_pc();
+
+ while (program_counter != end_sim_pc) {
+ if (enableStopSimAt && (icount_ == Simulator::StopSimAt)) {
+ RiscvDebugger dbg(this);
+ dbg.Debug();
+ }
+ if (single_stepping_) {
+ single_step_callback_(single_step_callback_arg_, this,
+ (void*)program_counter);
+ }
+ Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
+ InstructionDecode(instr);
+ icount_++;
+ program_counter = get_pc();
+ }
+
+ if (single_stepping_) {
+ single_step_callback_(single_step_callback_arg_, this, nullptr);
+ }
+}
+
+// RISCV Instruction Decode Routine
+void Simulator::DecodeRVRType() {
+ switch (instr_.InstructionBits() & kRTypeMask) {
+ case RO_ADD: {
+ set_rd(sext_xlen(rs1() + rs2()));
+ break;
+ }
+ case RO_SUB: {
+ set_rd(sext_xlen(rs1() - rs2()));
+ break;
+ }
+ case RO_SLL: {
+ set_rd(sext_xlen(rs1() << (rs2() & (xlen - 1))));
+ break;
+ }
+ case RO_SLT: {
+ set_rd(sreg_t(rs1()) < sreg_t(rs2()));
+ break;
+ }
+ case RO_SLTU: {
+ set_rd(reg_t(rs1()) < reg_t(rs2()));
+ break;
+ }
+ case RO_XOR: {
+ set_rd(rs1() ^ rs2());
+ break;
+ }
+ case RO_SRL: {
+ set_rd(sext_xlen(zext_xlen(rs1()) >> (rs2() & (xlen - 1))));
+ break;
+ }
+ case RO_SRA: {
+ set_rd(sext_xlen(sext_xlen(rs1()) >> (rs2() & (xlen - 1))));
+ break;
+ }
+ case RO_OR: {
+ set_rd(rs1() | rs2());
+ break;
+ }
+ case RO_AND: {
+ set_rd(rs1() & rs2());
+ break;
+ }
+# ifdef JS_CODEGEN_RISCV64
+ case RO_ADDW: {
+ set_rd(sext32(rs1() + rs2()));
+ break;
+ }
+ case RO_SUBW: {
+ set_rd(sext32(rs1() - rs2()));
+ break;
+ }
+ case RO_SLLW: {
+ set_rd(sext32(rs1() << (rs2() & 0x1F)));
+ break;
+ }
+ case RO_SRLW: {
+ set_rd(sext32(uint32_t(rs1()) >> (rs2() & 0x1F)));
+ break;
+ }
+ case RO_SRAW: {
+ set_rd(sext32(int32_t(rs1()) >> (rs2() & 0x1F)));
+ break;
+ }
+# endif /* JS_CODEGEN_RISCV64 */
+ // TODO(riscv): Add RISCV M extension macro
+ case RO_MUL: {
+ set_rd(rs1() * rs2());
+ break;
+ }
+ case RO_MULH: {
+ set_rd(mulh(rs1(), rs2()));
+ break;
+ }
+ case RO_MULHSU: {
+ set_rd(mulhsu(rs1(), rs2()));
+ break;
+ }
+ case RO_MULHU: {
+ set_rd(mulhu(rs1(), rs2()));
+ break;
+ }
+ case RO_DIV: {
+ sreg_t lhs = sext_xlen(rs1());
+ sreg_t rhs = sext_xlen(rs2());
+ if (rhs == 0) {
+ set_rd(-1);
+ } else if (lhs == INTPTR_MIN && rhs == -1) {
+ set_rd(lhs);
+ } else {
+ set_rd(sext_xlen(lhs / rhs));
+ }
+ break;
+ }
+ case RO_DIVU: {
+ reg_t lhs = zext_xlen(rs1());
+ reg_t rhs = zext_xlen(rs2());
+ if (rhs == 0) {
+ set_rd(UINTPTR_MAX);
+ } else {
+ set_rd(zext_xlen(lhs / rhs));
+ }
+ break;
+ }
+ case RO_REM: {
+ sreg_t lhs = sext_xlen(rs1());
+ sreg_t rhs = sext_xlen(rs2());
+ if (rhs == 0) {
+ set_rd(lhs);
+ } else if (lhs == INTPTR_MIN && rhs == -1) {
+ set_rd(0);
+ } else {
+ set_rd(sext_xlen(lhs % rhs));
+ }
+ break;
+ }
+ case RO_REMU: {
+ reg_t lhs = zext_xlen(rs1());
+ reg_t rhs = zext_xlen(rs2());
+ if (rhs == 0) {
+ set_rd(lhs);
+ } else {
+ set_rd(zext_xlen(lhs % rhs));
+ }
+ break;
+ }
+# ifdef JS_CODEGEN_RISCV64
+ case RO_MULW: {
+ set_rd(sext32(sext32(rs1()) * sext32(rs2())));
+ break;
+ }
+ case RO_DIVW: {
+ sreg_t lhs = sext32(rs1());
+ sreg_t rhs = sext32(rs2());
+ if (rhs == 0) {
+ set_rd(-1);
+ } else if (lhs == INT32_MIN && rhs == -1) {
+ set_rd(lhs);
+ } else {
+ set_rd(sext32(lhs / rhs));
+ }
+ break;
+ }
+ case RO_DIVUW: {
+ reg_t lhs = zext32(rs1());
+ reg_t rhs = zext32(rs2());
+ if (rhs == 0) {
+ set_rd(UINT32_MAX);
+ } else {
+ set_rd(zext32(lhs / rhs));
+ }
+ break;
+ }
+ case RO_REMW: {
+ sreg_t lhs = sext32(rs1());
+ sreg_t rhs = sext32(rs2());
+ if (rhs == 0) {
+ set_rd(lhs);
+ } else if (lhs == INT32_MIN && rhs == -1) {
+ set_rd(0);
+ } else {
+ set_rd(sext32(lhs % rhs));
+ }
+ break;
+ }
+ case RO_REMUW: {
+ reg_t lhs = zext32(rs1());
+ reg_t rhs = zext32(rs2());
+ if (rhs == 0) {
+ set_rd(zext32(lhs));
+ } else {
+ set_rd(zext32(lhs % rhs));
+ }
+ break;
+ }
+# endif /*JS_CODEGEN_RISCV64*/
+ // TODO(riscv): End Add RISCV M extension macro
+ default: {
+ switch (instr_.BaseOpcode()) {
+ case AMO:
+ DecodeRVRAType();
+ break;
+ case OP_FP:
+ DecodeRVRFPType();
+ break;
+ default:
+ UNSUPPORTED();
+ }
+ }
+ }
+}
+
+template <typename T>
+T Simulator::FMaxMinHelper(T a, T b, MaxMinKind kind) {
+ // set invalid bit for signaling nan
+ if ((a == std::numeric_limits<T>::signaling_NaN()) ||
+ (b == std::numeric_limits<T>::signaling_NaN())) {
+ set_csr_bits(csr_fflags, kInvalidOperation);
+ }
+
+ T result = 0;
+ if (std::isnan(a) && std::isnan(b)) {
+ result = std::numeric_limits<float>::quiet_NaN();
+ } else if (std::isnan(a)) {
+ result = b;
+ } else if (std::isnan(b)) {
+ result = a;
+ } else if (b == a) { // Handle -0.0 == 0.0 case.
+ if (kind == MaxMinKind::kMax) {
+ result = std::signbit(b) ? a : b;
+ } else {
+ result = std::signbit(b) ? b : a;
+ }
+ } else {
+ result = (kind == MaxMinKind::kMax) ? fmax(a, b) : fmin(a, b);
+ }
+
+ return result;
+}
+
+float Simulator::RoundF2FHelper(float input_val, int rmode) {
+ if (rmode == DYN) rmode = get_dynamic_rounding_mode();
+
+ float rounded = 0;
+ switch (rmode) {
+ case RNE: { // Round to Nearest, tiest to Even
+ rounded = floorf(input_val);
+ float error = input_val - rounded;
+
+ // Take care of correctly handling the range [-0.5, -0.0], which must
+ // yield -0.0.
+ if ((-0.5 <= input_val) && (input_val < 0.0)) {
+ rounded = -0.0;
+
+ // If the error is greater than 0.5, or is equal to 0.5 and the integer
+ // result is odd, round up.
+ } else if ((error > 0.5) ||
+ ((error == 0.5) && (std::fmod(rounded, 2) != 0))) {
+ rounded++;
+ }
+ break;
+ }
+ case RTZ: // Round towards Zero
+ rounded = std::truncf(input_val);
+ break;
+ case RDN: // Round Down (towards -infinity)
+ rounded = floorf(input_val);
+ break;
+ case RUP: // Round Up (towards +infinity)
+ rounded = ceilf(input_val);
+ break;
+ case RMM: // Round to Nearest, tiest to Max Magnitude
+ rounded = std::roundf(input_val);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ return rounded;
+}
+
+double Simulator::RoundF2FHelper(double input_val, int rmode) {
+ if (rmode == DYN) rmode = get_dynamic_rounding_mode();
+
+ double rounded = 0;
+ switch (rmode) {
+ case RNE: { // Round to Nearest, tiest to Even
+ rounded = std::floor(input_val);
+ double error = input_val - rounded;
+
+ // Take care of correctly handling the range [-0.5, -0.0], which must
+ // yield -0.0.
+ if ((-0.5 <= input_val) && (input_val < 0.0)) {
+ rounded = -0.0;
+
+ // If the error is greater than 0.5, or is equal to 0.5 and the integer
+ // result is odd, round up.
+ } else if ((error > 0.5) ||
+ ((error == 0.5) && (std::fmod(rounded, 2) != 0))) {
+ rounded++;
+ }
+ break;
+ }
+ case RTZ: // Round towards Zero
+ rounded = std::trunc(input_val);
+ break;
+ case RDN: // Round Down (towards -infinity)
+ rounded = std::floor(input_val);
+ break;
+ case RUP: // Round Up (towards +infinity)
+ rounded = std::ceil(input_val);
+ break;
+ case RMM: // Round to Nearest, tiest to Max Magnitude
+ rounded = std::round(input_val);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return rounded;
+}
+
+// convert rounded floating-point to integer types, handle input values that
+// are out-of-range, underflow, or NaN, and set appropriate fflags
+template <typename I_TYPE, typename F_TYPE>
+I_TYPE Simulator::RoundF2IHelper(F_TYPE original, int rmode) {
+ MOZ_ASSERT(std::is_integral<I_TYPE>::value);
+
+ MOZ_ASSERT((std::is_same<F_TYPE, float>::value ||
+ std::is_same<F_TYPE, double>::value));
+
+ I_TYPE max_i = std::numeric_limits<I_TYPE>::max();
+ I_TYPE min_i = std::numeric_limits<I_TYPE>::min();
+
+ if (!std::isfinite(original)) {
+ set_fflags(kInvalidOperation);
+ if (std::isnan(original) ||
+ original == std::numeric_limits<F_TYPE>::infinity()) {
+ return max_i;
+ } else {
+ MOZ_ASSERT(original == -std::numeric_limits<F_TYPE>::infinity());
+ return min_i;
+ }
+ }
+
+ F_TYPE rounded = RoundF2FHelper(original, rmode);
+ if (original != rounded) set_fflags(kInexact);
+
+ if (!std::isfinite(rounded)) {
+ set_fflags(kInvalidOperation);
+ if (std::isnan(rounded) ||
+ rounded == std::numeric_limits<F_TYPE>::infinity()) {
+ return max_i;
+ } else {
+ MOZ_ASSERT(rounded == -std::numeric_limits<F_TYPE>::infinity());
+ return min_i;
+ }
+ }
+
+ // Since integer max values are either all 1s (for unsigned) or all 1s
+ // except for sign-bit (for signed), they cannot be represented precisely in
+ // floating point, in order to precisely tell whether the rounded floating
+ // point is within the max range, we compare against (max_i+1) which would
+ // have a single 1 w/ many trailing zeros
+ float max_i_plus_1 =
+ std::is_same<uint64_t, I_TYPE>::value
+ ? 0x1p64f // uint64_t::max + 1 cannot be represented in integers,
+ // so use its float representation directly
+ : static_cast<float>(static_cast<uint64_t>(max_i) + 1);
+ if (rounded >= max_i_plus_1) {
+ set_fflags(kOverflow | kInvalidOperation);
+ return max_i;
+ }
+
+ // Since min_i (either 0 for unsigned, or for signed) is represented
+ // precisely in floating-point, comparing rounded directly against min_i
+ if (rounded <= min_i) {
+ if (rounded < min_i) set_fflags(kOverflow | kInvalidOperation);
+ return min_i;
+ }
+
+ F_TYPE underflow_fval =
+ std::is_same<F_TYPE, float>::value ? FLT_MIN : DBL_MIN;
+ if (rounded < underflow_fval && rounded > -underflow_fval && rounded != 0) {
+ set_fflags(kUnderflow);
+ }
+
+ return static_cast<I_TYPE>(rounded);
+}
+
+template <typename T>
+static int64_t FclassHelper(T value) {
+ switch (std::fpclassify(value)) {
+ case FP_INFINITE:
+ return (std::signbit(value) ? kNegativeInfinity : kPositiveInfinity);
+ case FP_NAN:
+ return (isSnan(value) ? kSignalingNaN : kQuietNaN);
+ case FP_NORMAL:
+ return (std::signbit(value) ? kNegativeNormalNumber
+ : kPositiveNormalNumber);
+ case FP_SUBNORMAL:
+ return (std::signbit(value) ? kNegativeSubnormalNumber
+ : kPositiveSubnormalNumber);
+ case FP_ZERO:
+ return (std::signbit(value) ? kNegativeZero : kPositiveZero);
+ default:
+ UNREACHABLE();
+ }
+ UNREACHABLE();
+ return FP_ZERO;
+}
+
+template <typename T>
+bool Simulator::CompareFHelper(T input1, T input2, FPUCondition cc) {
+ MOZ_ASSERT(std::is_floating_point<T>::value);
+ bool result = false;
+ switch (cc) {
+ case LT:
+ case LE:
+ // FLT, FLE are signaling compares
+ if (std::isnan(input1) || std::isnan(input2)) {
+ set_fflags(kInvalidOperation);
+ result = false;
+ } else {
+ result = (cc == LT) ? (input1 < input2) : (input1 <= input2);
+ }
+ break;
+ case EQ:
+ if (std::numeric_limits<T>::signaling_NaN() == input1 ||
+ std::numeric_limits<T>::signaling_NaN() == input2) {
+ set_fflags(kInvalidOperation);
+ }
+ if (std::isnan(input1) || std::isnan(input2)) {
+ result = false;
+ } else {
+ result = (input1 == input2);
+ }
+ break;
+ case NE:
+ if (std::numeric_limits<T>::signaling_NaN() == input1 ||
+ std::numeric_limits<T>::signaling_NaN() == input2) {
+ set_fflags(kInvalidOperation);
+ }
+ if (std::isnan(input1) || std::isnan(input2)) {
+ result = true;
+ } else {
+ result = (input1 != input2);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return result;
+}
+
+template <typename T>
+static inline bool is_invalid_fmul(T src1, T src2) {
+ return (isinf(src1) && src2 == static_cast<T>(0.0)) ||
+ (src1 == static_cast<T>(0.0) && isinf(src2));
+}
+
+template <typename T>
+static inline bool is_invalid_fadd(T src1, T src2) {
+ return (isinf(src1) && isinf(src2) &&
+ std::signbit(src1) != std::signbit(src2));
+}
+
+template <typename T>
+static inline bool is_invalid_fsub(T src1, T src2) {
+ return (isinf(src1) && isinf(src2) &&
+ std::signbit(src1) == std::signbit(src2));
+}
+
+template <typename T>
+static inline bool is_invalid_fdiv(T src1, T src2) {
+ return ((src1 == 0 && src2 == 0) || (isinf(src1) && isinf(src2)));
+}
+
+template <typename T>
+static inline bool is_invalid_fsqrt(T src1) {
+ return (src1 < 0);
+}
+
+int Simulator::loadLinkedW(uint64_t addr, SimInstruction* instr) {
+ if ((addr & 3) == 0) {
+ if (handleWasmSegFault(addr, 4)) {
+ return -1;
+ }
+
+ volatile int32_t* ptr = reinterpret_cast<volatile int32_t*>(addr);
+ int32_t value = *ptr;
+ lastLLValue_ = value;
+ LLAddr_ = addr;
+ // Note that any memory write or "external" interrupt should reset this
+ // value to false.
+ LLBit_ = true;
+ return value;
+ }
+ printf("Unaligned write at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+int Simulator::storeConditionalW(uint64_t addr, int value,
+ SimInstruction* instr) {
+ // Correct behavior in this case, as defined by architecture, is to just
+ // return 0, but there is no point at allowing that. It is certainly an
+ // indicator of a bug.
+ if (addr != LLAddr_) {
+ printf("SC to bad address: 0x%016" PRIx64 ", pc=0x%016" PRIx64
+ ", expected: 0x%016" PRIx64 "\n",
+ addr, reinterpret_cast<intptr_t>(instr), LLAddr_);
+ MOZ_CRASH();
+ }
+
+ if ((addr & 3) == 0) {
+ SharedMem<int32_t*> ptr =
+ SharedMem<int32_t*>::shared(reinterpret_cast<int32_t*>(addr));
+
+ if (!LLBit_) {
+ return 1;
+ }
+
+ LLBit_ = false;
+ LLAddr_ = 0;
+ int32_t expected = int32_t(lastLLValue_);
+ int32_t old =
+ AtomicOperations::compareExchangeSeqCst(ptr, expected, int32_t(value));
+ return (old == expected) ? 0 : 1;
+ }
+ printf("Unaligned SC at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+int64_t Simulator::loadLinkedD(uint64_t addr, SimInstruction* instr) {
+ if ((addr & kPointerAlignmentMask) == 0) {
+ if (handleWasmSegFault(addr, 8)) {
+ return -1;
+ }
+
+ volatile int64_t* ptr = reinterpret_cast<volatile int64_t*>(addr);
+ int64_t value = *ptr;
+ lastLLValue_ = value;
+ LLAddr_ = addr;
+ // Note that any memory write or "external" interrupt should reset this
+ // value to false.
+ LLBit_ = true;
+ return value;
+ }
+ printf("Unaligned write at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+int Simulator::storeConditionalD(uint64_t addr, int64_t value,
+ SimInstruction* instr) {
+ // Correct behavior in this case, as defined by architecture, is to just
+ // return 0, but there is no point at allowing that. It is certainly an
+ // indicator of a bug.
+ if (addr != LLAddr_) {
+ printf("SC to bad address: 0x%016" PRIx64 ", pc=0x%016" PRIx64
+ ", expected: 0x%016" PRIx64 "\n",
+ addr, reinterpret_cast<intptr_t>(instr), LLAddr_);
+ MOZ_CRASH();
+ }
+
+ if ((addr & kPointerAlignmentMask) == 0) {
+ SharedMem<int64_t*> ptr =
+ SharedMem<int64_t*>::shared(reinterpret_cast<int64_t*>(addr));
+
+ if (!LLBit_) {
+ return 1;
+ }
+
+ LLBit_ = false;
+ LLAddr_ = 0;
+ int64_t expected = lastLLValue_;
+ int64_t old =
+ AtomicOperations::compareExchangeSeqCst(ptr, expected, int64_t(value));
+ return (old == expected) ? 0 : 1;
+ }
+ printf("Unaligned SC at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+void Simulator::DecodeRVRAType() {
+ // TODO(riscv): Add macro for RISCV A extension
+ // Special handling for A extension instructions because it uses func5
+ // For all A extension instruction, V8 simulator is pure sequential. No
+ // Memory address lock or other synchronizaiton behaviors.
+ switch (instr_.InstructionBits() & kRATypeMask) {
+ case RO_LR_W: {
+ sreg_t addr = rs1();
+ set_rd(loadLinkedW(addr, &instr_));
+ TraceLr(addr, getRegister(rd_reg()), getRegister(rd_reg()));
+ break;
+ }
+ case RO_SC_W: {
+ sreg_t addr = rs1();
+ auto value = static_cast<int32_t>(rs2());
+ auto result =
+ storeConditionalW(addr, static_cast<int32_t>(rs2()), &instr_);
+ set_rd(result);
+ if (!result) {
+ TraceSc(addr, value);
+ }
+ break;
+ }
+ case RO_AMOSWAP_W: {
+ if ((rs1() & 0x3) != 0) {
+ DieOrDebug();
+ }
+ set_rd(sext32(amo<uint32_t>(
+ rs1(), [&](uint32_t lhs) { return (uint32_t)rs2(); }, instr_.instr(),
+ WORD)));
+ break;
+ }
+ case RO_AMOADD_W: {
+ if ((rs1() & 0x3) != 0) {
+ DieOrDebug();
+ }
+ set_rd(sext32(amo<uint32_t>(
+ rs1(), [&](uint32_t lhs) { return lhs + (uint32_t)rs2(); },
+ instr_.instr(), WORD)));
+ break;
+ }
+ case RO_AMOXOR_W: {
+ if ((rs1() & 0x3) != 0) {
+ DieOrDebug();
+ }
+ set_rd(sext32(amo<uint32_t>(
+ rs1(), [&](uint32_t lhs) { return lhs ^ (uint32_t)rs2(); },
+ instr_.instr(), WORD)));
+ break;
+ }
+ case RO_AMOAND_W: {
+ if ((rs1() & 0x3) != 0) {
+ DieOrDebug();
+ }
+ set_rd(sext32(amo<uint32_t>(
+ rs1(), [&](uint32_t lhs) { return lhs & (uint32_t)rs2(); },
+ instr_.instr(), WORD)));
+ break;
+ }
+ case RO_AMOOR_W: {
+ if ((rs1() & 0x3) != 0) {
+ DieOrDebug();
+ }
+ set_rd(sext32(amo<uint32_t>(
+ rs1(), [&](uint32_t lhs) { return lhs | (uint32_t)rs2(); },
+ instr_.instr(), WORD)));
+ break;
+ }
+ case RO_AMOMIN_W: {
+ if ((rs1() & 0x3) != 0) {
+ DieOrDebug();
+ }
+ set_rd(sext32(amo<int32_t>(
+ rs1(), [&](int32_t lhs) { return std::min(lhs, (int32_t)rs2()); },
+ instr_.instr(), WORD)));
+ break;
+ }
+ case RO_AMOMAX_W: {
+ if ((rs1() & 0x3) != 0) {
+ DieOrDebug();
+ }
+ set_rd(sext32(amo<int32_t>(
+ rs1(), [&](int32_t lhs) { return std::max(lhs, (int32_t)rs2()); },
+ instr_.instr(), WORD)));
+ break;
+ }
+ case RO_AMOMINU_W: {
+ if ((rs1() & 0x3) != 0) {
+ DieOrDebug();
+ }
+ set_rd(sext32(amo<uint32_t>(
+ rs1(), [&](uint32_t lhs) { return std::min(lhs, (uint32_t)rs2()); },
+ instr_.instr(), WORD)));
+ break;
+ }
+ case RO_AMOMAXU_W: {
+ if ((rs1() & 0x3) != 0) {
+ DieOrDebug();
+ }
+ set_rd(sext32(amo<uint32_t>(
+ rs1(), [&](uint32_t lhs) { return std::max(lhs, (uint32_t)rs2()); },
+ instr_.instr(), WORD)));
+ break;
+ }
+# ifdef JS_CODEGEN_RISCV64
+ case RO_LR_D: {
+ sreg_t addr = rs1();
+ set_rd(loadLinkedD(addr, &instr_));
+ TraceLr(addr, getRegister(rd_reg()), getRegister(rd_reg()));
+ break;
+ }
+ case RO_SC_D: {
+ sreg_t addr = rs1();
+ auto value = static_cast<int64_t>(rs2());
+ auto result =
+ storeConditionalD(addr, static_cast<int64_t>(rs2()), &instr_);
+ set_rd(result);
+ if (!result) {
+ TraceSc(addr, value);
+ }
+ break;
+ }
+ case RO_AMOSWAP_D: {
+ set_rd(amo<int64_t>(
+ rs1(), [&](int64_t lhs) { return rs2(); }, instr_.instr(), DWORD));
+ break;
+ }
+ case RO_AMOADD_D: {
+ set_rd(amo<int64_t>(
+ rs1(), [&](int64_t lhs) { return lhs + rs2(); }, instr_.instr(),
+ DWORD));
+ break;
+ }
+ case RO_AMOXOR_D: {
+ set_rd(amo<int64_t>(
+ rs1(), [&](int64_t lhs) { return lhs ^ rs2(); }, instr_.instr(),
+ DWORD));
+ break;
+ }
+ case RO_AMOAND_D: {
+ set_rd(amo<int64_t>(
+ rs1(), [&](int64_t lhs) { return lhs & rs2(); }, instr_.instr(),
+ DWORD));
+ break;
+ }
+ case RO_AMOOR_D: {
+ set_rd(amo<int64_t>(
+ rs1(), [&](int64_t lhs) { return lhs | rs2(); }, instr_.instr(),
+ DWORD));
+ break;
+ }
+ case RO_AMOMIN_D: {
+ set_rd(amo<int64_t>(
+ rs1(), [&](int64_t lhs) { return std::min(lhs, rs2()); },
+ instr_.instr(), DWORD));
+ break;
+ }
+ case RO_AMOMAX_D: {
+ set_rd(amo<int64_t>(
+ rs1(), [&](int64_t lhs) { return std::max(lhs, rs2()); },
+ instr_.instr(), DWORD));
+ break;
+ }
+ case RO_AMOMINU_D: {
+ set_rd(amo<uint64_t>(
+ rs1(), [&](uint64_t lhs) { return std::min(lhs, (uint64_t)rs2()); },
+ instr_.instr(), DWORD));
+ break;
+ }
+ case RO_AMOMAXU_D: {
+ set_rd(amo<uint64_t>(
+ rs1(), [&](uint64_t lhs) { return std::max(lhs, (uint64_t)rs2()); },
+ instr_.instr(), DWORD));
+ break;
+ }
+# endif /*JS_CODEGEN_RISCV64*/
+ // TODO(riscv): End Add macro for RISCV A extension
+ default: {
+ UNSUPPORTED();
+ }
+ }
+}
+
+void Simulator::DecodeRVRFPType() {
+ // OP_FP instructions (F/D) uses func7 first. Some further uses func3 and
+ // rs2()
+
+ // kRATypeMask is only for func7
+ switch (instr_.InstructionBits() & kRFPTypeMask) {
+ // TODO(riscv): Add macro for RISCV F extension
+ case RO_FADD_S: {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](float frs1, float frs2) {
+ if (is_invalid_fadd(frs1, frs2)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<float>::quiet_NaN();
+ } else {
+ return frs1 + frs2;
+ }
+ };
+ set_frd(CanonicalizeFPUOp2<float>(fn));
+ break;
+ }
+ case RO_FSUB_S: {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](float frs1, float frs2) {
+ if (is_invalid_fsub(frs1, frs2)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<float>::quiet_NaN();
+ } else {
+ return frs1 - frs2;
+ }
+ };
+ set_frd(CanonicalizeFPUOp2<float>(fn));
+ break;
+ }
+ case RO_FMUL_S: {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](float frs1, float frs2) {
+ if (is_invalid_fmul(frs1, frs2)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<float>::quiet_NaN();
+ } else {
+ return frs1 * frs2;
+ }
+ };
+ set_frd(CanonicalizeFPUOp2<float>(fn));
+ break;
+ }
+ case RO_FDIV_S: {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](float frs1, float frs2) {
+ if (is_invalid_fdiv(frs1, frs2)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<float>::quiet_NaN();
+ } else if (frs2 == 0.0f) {
+ this->set_fflags(kDivideByZero);
+ return (std::signbit(frs1) == std::signbit(frs2)
+ ? std::numeric_limits<float>::infinity()
+ : -std::numeric_limits<float>::infinity());
+ } else {
+ return frs1 / frs2;
+ }
+ };
+ set_frd(CanonicalizeFPUOp2<float>(fn));
+ break;
+ }
+ case RO_FSQRT_S: {
+ if (instr_.Rs2Value() == 0b00000) {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](float frs) {
+ if (is_invalid_fsqrt(frs)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<float>::quiet_NaN();
+ } else {
+ return std::sqrt(frs);
+ }
+ };
+ set_frd(CanonicalizeFPUOp1<float>(fn));
+ } else {
+ UNSUPPORTED();
+ }
+ break;
+ }
+ case RO_FSGNJ_S: { // RO_FSGNJN_S RO_FSQNJX_S
+ switch (instr_.Funct3Value()) {
+ case 0b000: { // RO_FSGNJ_S
+ set_frd(fsgnj32(frs1_boxed(), frs2_boxed(), false, false));
+ break;
+ }
+ case 0b001: { // RO_FSGNJN_S
+ set_frd(fsgnj32(frs1_boxed(), frs2_boxed(), true, false));
+ break;
+ }
+ case 0b010: { // RO_FSQNJX_S
+ set_frd(fsgnj32(frs1_boxed(), frs2_boxed(), false, true));
+ break;
+ }
+ default: {
+ UNSUPPORTED();
+ }
+ }
+ break;
+ }
+ case RO_FMIN_S: { // RO_FMAX_S
+ switch (instr_.Funct3Value()) {
+ case 0b000: { // RO_FMIN_S
+ set_frd(FMaxMinHelper(frs1(), frs2(), MaxMinKind::kMin));
+ break;
+ }
+ case 0b001: { // RO_FMAX_S
+ set_frd(FMaxMinHelper(frs1(), frs2(), MaxMinKind::kMax));
+ break;
+ }
+ default: {
+ UNSUPPORTED();
+ }
+ }
+ break;
+ }
+ case RO_FCVT_W_S: { // RO_FCVT_WU_S , 64F RO_FCVT_L_S RO_FCVT_LU_S
+ float original_val = frs1();
+ switch (instr_.Rs2Value()) {
+ case 0b00000: { // RO_FCVT_W_S
+ set_rd(RoundF2IHelper<int32_t>(original_val, instr_.RoundMode()));
+ break;
+ }
+ case 0b00001: { // RO_FCVT_WU_S
+ set_rd(sext32(
+ RoundF2IHelper<uint32_t>(original_val, instr_.RoundMode())));
+ break;
+ }
+# ifdef JS_CODEGEN_RISCV64
+ case 0b00010: { // RO_FCVT_L_S
+ set_rd(RoundF2IHelper<int64_t>(original_val, instr_.RoundMode()));
+ break;
+ }
+ case 0b00011: { // RO_FCVT_LU_S
+ set_rd(RoundF2IHelper<uint64_t>(original_val, instr_.RoundMode()));
+ break;
+ }
+# endif /* JS_CODEGEN_RISCV64 */
+ default: {
+ UNSUPPORTED();
+ }
+ }
+ break;
+ }
+ case RO_FMV: { // RO_FCLASS_S
+ switch (instr_.Funct3Value()) {
+ case 0b000: {
+ if (instr_.Rs2Value() == 0b00000) {
+ // RO_FMV_X_W
+ set_rd(sext32(getFpuRegister(rs1_reg())));
+ } else {
+ UNSUPPORTED();
+ }
+ break;
+ }
+ case 0b001: { // RO_FCLASS_S
+ set_rd(FclassHelper(frs1()));
+ break;
+ }
+ default: {
+ UNSUPPORTED();
+ }
+ }
+ break;
+ }
+ case RO_FLE_S: { // RO_FEQ_S RO_FLT_S RO_FLE_S
+ switch (instr_.Funct3Value()) {
+ case 0b010: { // RO_FEQ_S
+ set_rd(CompareFHelper(frs1(), frs2(), EQ));
+ break;
+ }
+ case 0b001: { // RO_FLT_S
+ set_rd(CompareFHelper(frs1(), frs2(), LT));
+ break;
+ }
+ case 0b000: { // RO_FLE_S
+ set_rd(CompareFHelper(frs1(), frs2(), LE));
+ break;
+ }
+ default: {
+ UNSUPPORTED();
+ }
+ }
+ break;
+ }
+ case RO_FCVT_S_W: { // RO_FCVT_S_WU , 64F RO_FCVT_S_L RO_FCVT_S_LU
+ switch (instr_.Rs2Value()) {
+ case 0b00000: { // RO_FCVT_S_W
+ set_frd(static_cast<float>((int32_t)rs1()));
+ break;
+ }
+ case 0b00001: { // RO_FCVT_S_WU
+ set_frd(static_cast<float>((uint32_t)rs1()));
+ break;
+ }
+# ifdef JS_CODEGEN_RISCV64
+ case 0b00010: { // RO_FCVT_S_L
+ set_frd(static_cast<float>((int64_t)rs1()));
+ break;
+ }
+ case 0b00011: { // RO_FCVT_S_LU
+ set_frd(static_cast<float>((uint64_t)rs1()));
+ break;
+ }
+# endif /* JS_CODEGEN_RISCV64 */
+ default: {
+ UNSUPPORTED();
+ }
+ }
+ break;
+ }
+ case RO_FMV_W_X: {
+ if (instr_.Funct3Value() == 0b000) {
+ // since FMV preserves source bit-pattern, no need to canonize
+ Float32 result = Float32::FromBits((uint32_t)rs1());
+ set_frd(result);
+ } else {
+ UNSUPPORTED();
+ }
+ break;
+ }
+ // TODO(riscv): Add macro for RISCV D extension
+ case RO_FADD_D: {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](double drs1, double drs2) {
+ if (is_invalid_fadd(drs1, drs2)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<double>::quiet_NaN();
+ } else {
+ return drs1 + drs2;
+ }
+ };
+ set_drd(CanonicalizeFPUOp2<double>(fn));
+ break;
+ }
+ case RO_FSUB_D: {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](double drs1, double drs2) {
+ if (is_invalid_fsub(drs1, drs2)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<double>::quiet_NaN();
+ } else {
+ return drs1 - drs2;
+ }
+ };
+ set_drd(CanonicalizeFPUOp2<double>(fn));
+ break;
+ }
+ case RO_FMUL_D: {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](double drs1, double drs2) {
+ if (is_invalid_fmul(drs1, drs2)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<double>::quiet_NaN();
+ } else {
+ return drs1 * drs2;
+ }
+ };
+ set_drd(CanonicalizeFPUOp2<double>(fn));
+ break;
+ }
+ case RO_FDIV_D: {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](double drs1, double drs2) {
+ if (is_invalid_fdiv(drs1, drs2)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<double>::quiet_NaN();
+ } else if (drs2 == 0.0) {
+ this->set_fflags(kDivideByZero);
+ return (std::signbit(drs1) == std::signbit(drs2)
+ ? std::numeric_limits<double>::infinity()
+ : -std::numeric_limits<double>::infinity());
+ } else {
+ return drs1 / drs2;
+ }
+ };
+ set_drd(CanonicalizeFPUOp2<double>(fn));
+ break;
+ }
+ case RO_FSQRT_D: {
+ if (instr_.Rs2Value() == 0b00000) {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](double drs) {
+ if (is_invalid_fsqrt(drs)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<double>::quiet_NaN();
+ } else {
+ return std::sqrt(drs);
+ }
+ };
+ set_drd(CanonicalizeFPUOp1<double>(fn));
+ } else {
+ UNSUPPORTED();
+ }
+ break;
+ }
+ case RO_FSGNJ_D: { // RO_FSGNJN_D RO_FSQNJX_D
+ switch (instr_.Funct3Value()) {
+ case 0b000: { // RO_FSGNJ_D
+ set_drd(fsgnj64(drs1_boxed(), drs2_boxed(), false, false));
+ break;
+ }
+ case 0b001: { // RO_FSGNJN_D
+ set_drd(fsgnj64(drs1_boxed(), drs2_boxed(), true, false));
+ break;
+ }
+ case 0b010: { // RO_FSQNJX_D
+ set_drd(fsgnj64(drs1_boxed(), drs2_boxed(), false, true));
+ break;
+ }
+ default: {
+ UNSUPPORTED();
+ }
+ }
+ break;
+ }
+ case RO_FMIN_D: { // RO_FMAX_D
+ switch (instr_.Funct3Value()) {
+ case 0b000: { // RO_FMIN_D
+ set_drd(FMaxMinHelper(drs1(), drs2(), MaxMinKind::kMin));
+ break;
+ }
+ case 0b001: { // RO_FMAX_D
+ set_drd(FMaxMinHelper(drs1(), drs2(), MaxMinKind::kMax));
+ break;
+ }
+ default: {
+ UNSUPPORTED();
+ }
+ }
+ break;
+ }
+ case (RO_FCVT_S_D & kRFPTypeMask): {
+ if (instr_.Rs2Value() == 0b00001) {
+ auto fn = [](double drs) { return static_cast<float>(drs); };
+ set_frd(CanonicalizeDoubleToFloatOperation(fn));
+ } else {
+ UNSUPPORTED();
+ }
+ break;
+ }
+ case RO_FCVT_D_S: {
+ if (instr_.Rs2Value() == 0b00000) {
+ auto fn = [](float frs) { return static_cast<double>(frs); };
+ set_drd(CanonicalizeFloatToDoubleOperation(fn));
+ } else {
+ UNSUPPORTED();
+ }
+ break;
+ }
+ case RO_FLE_D: { // RO_FEQ_D RO_FLT_D RO_FLE_D
+ switch (instr_.Funct3Value()) {
+ case 0b010: { // RO_FEQ_S
+ set_rd(CompareFHelper(drs1(), drs2(), EQ));
+ break;
+ }
+ case 0b001: { // RO_FLT_D
+ set_rd(CompareFHelper(drs1(), drs2(), LT));
+ break;
+ }
+ case 0b000: { // RO_FLE_D
+ set_rd(CompareFHelper(drs1(), drs2(), LE));
+ break;
+ }
+ default: {
+ UNSUPPORTED();
+ }
+ }
+ break;
+ }
+ case (RO_FCLASS_D & kRFPTypeMask): { // RO_FCLASS_D , 64D RO_FMV_X_D
+ if (instr_.Rs2Value() != 0b00000) {
+ UNSUPPORTED();
+ }
+ switch (instr_.Funct3Value()) {
+ case 0b001: { // RO_FCLASS_D
+ set_rd(FclassHelper(drs1()));
+ break;
+ }
+# ifdef JS_CODEGEN_RISCV64
+ case 0b000: { // RO_FMV_X_D
+ set_rd(bit_cast<int64_t>(drs1()));
+ break;
+ }
+# endif /* JS_CODEGEN_RISCV64 */
+ default: {
+ UNSUPPORTED();
+ }
+ }
+ break;
+ }
+ case RO_FCVT_W_D: { // RO_FCVT_WU_D , 64F RO_FCVT_L_D RO_FCVT_LU_D
+ double original_val = drs1();
+ switch (instr_.Rs2Value()) {
+ case 0b00000: { // RO_FCVT_W_D
+ set_rd(RoundF2IHelper<int32_t>(original_val, instr_.RoundMode()));
+ break;
+ }
+ case 0b00001: { // RO_FCVT_WU_D
+ set_rd(sext32(
+ RoundF2IHelper<uint32_t>(original_val, instr_.RoundMode())));
+ break;
+ }
+# ifdef JS_CODEGEN_RISCV64
+ case 0b00010: { // RO_FCVT_L_D
+ set_rd(RoundF2IHelper<int64_t>(original_val, instr_.RoundMode()));
+ break;
+ }
+ case 0b00011: { // RO_FCVT_LU_D
+ set_rd(RoundF2IHelper<uint64_t>(original_val, instr_.RoundMode()));
+ break;
+ }
+# endif /* JS_CODEGEN_RISCV64 */
+ default: {
+ UNSUPPORTED();
+ }
+ }
+ break;
+ }
+ case RO_FCVT_D_W: { // RO_FCVT_D_WU , 64F RO_FCVT_D_L RO_FCVT_D_LU
+ switch (instr_.Rs2Value()) {
+ case 0b00000: { // RO_FCVT_D_W
+ set_drd((int32_t)rs1());
+ break;
+ }
+ case 0b00001: { // RO_FCVT_D_WU
+ set_drd((uint32_t)rs1());
+ break;
+ }
+# ifdef JS_CODEGEN_RISCV64
+ case 0b00010: { // RO_FCVT_D_L
+ set_drd((int64_t)rs1());
+ break;
+ }
+ case 0b00011: { // RO_FCVT_D_LU
+ set_drd((uint64_t)rs1());
+ break;
+ }
+# endif /* JS_CODEGEN_RISCV64 */
+ default: {
+ UNSUPPORTED();
+ }
+ }
+ break;
+ }
+# ifdef JS_CODEGEN_RISCV64
+ case RO_FMV_D_X: {
+ if (instr_.Funct3Value() == 0b000 && instr_.Rs2Value() == 0b00000) {
+ // Since FMV preserves source bit-pattern, no need to canonize
+ set_drd(bit_cast<double>(rs1()));
+ } else {
+ UNSUPPORTED();
+ }
+ break;
+ }
+# endif /* JS_CODEGEN_RISCV64 */
+ default: {
+ UNSUPPORTED();
+ }
+ }
+}
+
+void Simulator::DecodeRVR4Type() {
+ switch (instr_.InstructionBits() & kR4TypeMask) {
+ // TODO(riscv): use F Extension macro block
+ case RO_FMADD_S: {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](float frs1, float frs2, float frs3) {
+ if (is_invalid_fmul(frs1, frs2) || is_invalid_fadd(frs1 * frs2, frs3)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<float>::quiet_NaN();
+ } else {
+ return std::fma(frs1, frs2, frs3);
+ }
+ };
+ set_frd(CanonicalizeFPUOp3<float>(fn));
+ break;
+ }
+ case RO_FMSUB_S: {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](float frs1, float frs2, float frs3) {
+ if (is_invalid_fmul(frs1, frs2) || is_invalid_fsub(frs1 * frs2, frs3)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<float>::quiet_NaN();
+ } else {
+ return std::fma(frs1, frs2, -frs3);
+ }
+ };
+ set_frd(CanonicalizeFPUOp3<float>(fn));
+ break;
+ }
+ case RO_FNMSUB_S: {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](float frs1, float frs2, float frs3) {
+ if (is_invalid_fmul(frs1, frs2) || is_invalid_fsub(frs3, frs1 * frs2)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<float>::quiet_NaN();
+ } else {
+ return -std::fma(frs1, frs2, -frs3);
+ }
+ };
+ set_frd(CanonicalizeFPUOp3<float>(fn));
+ break;
+ }
+ case RO_FNMADD_S: {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](float frs1, float frs2, float frs3) {
+ if (is_invalid_fmul(frs1, frs2) || is_invalid_fadd(frs1 * frs2, frs3)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<float>::quiet_NaN();
+ } else {
+ return -std::fma(frs1, frs2, frs3);
+ }
+ };
+ set_frd(CanonicalizeFPUOp3<float>(fn));
+ break;
+ }
+ // TODO(riscv): use F Extension macro block
+ case RO_FMADD_D: {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](double drs1, double drs2, double drs3) {
+ if (is_invalid_fmul(drs1, drs2) || is_invalid_fadd(drs1 * drs2, drs3)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<double>::quiet_NaN();
+ } else {
+ return std::fma(drs1, drs2, drs3);
+ }
+ };
+ set_drd(CanonicalizeFPUOp3<double>(fn));
+ break;
+ }
+ case RO_FMSUB_D: {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](double drs1, double drs2, double drs3) {
+ if (is_invalid_fmul(drs1, drs2) || is_invalid_fsub(drs1 * drs2, drs3)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<double>::quiet_NaN();
+ } else {
+ return std::fma(drs1, drs2, -drs3);
+ }
+ };
+ set_drd(CanonicalizeFPUOp3<double>(fn));
+ break;
+ }
+ case RO_FNMSUB_D: {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](double drs1, double drs2, double drs3) {
+ if (is_invalid_fmul(drs1, drs2) || is_invalid_fsub(drs3, drs1 * drs2)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<double>::quiet_NaN();
+ } else {
+ return -std::fma(drs1, drs2, -drs3);
+ }
+ };
+ set_drd(CanonicalizeFPUOp3<double>(fn));
+ break;
+ }
+ case RO_FNMADD_D: {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](double drs1, double drs2, double drs3) {
+ if (is_invalid_fmul(drs1, drs2) || is_invalid_fadd(drs1 * drs2, drs3)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<double>::quiet_NaN();
+ } else {
+ return -std::fma(drs1, drs2, drs3);
+ }
+ };
+ set_drd(CanonicalizeFPUOp3<double>(fn));
+ break;
+ }
+ default:
+ UNSUPPORTED();
+ }
+}
+
+# ifdef CAN_USE_RVV_INSTRUCTIONS
+bool Simulator::DecodeRvvVL() {
+ uint32_t instr_temp =
+ instr_.InstructionBits() & (kRvvMopMask | kRvvNfMask | kBaseOpcodeMask);
+ if (RO_V_VL == instr_temp) {
+ if (!(instr_.InstructionBits() & (kRvvRs2Mask))) {
+ switch (instr_.vl_vs_width()) {
+ case 8: {
+ RVV_VI_LD(0, (i * nf + fn), int8, false);
+ break;
+ }
+ case 16: {
+ RVV_VI_LD(0, (i * nf + fn), int16, false);
+ break;
+ }
+ case 32: {
+ RVV_VI_LD(0, (i * nf + fn), int32, false);
+ break;
+ }
+ case 64: {
+ RVV_VI_LD(0, (i * nf + fn), int64, false);
+ break;
+ }
+ default:
+ UNIMPLEMENTED_RISCV();
+ break;
+ }
+ return true;
+ } else {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ }
+ } else if (RO_V_VLS == instr_temp) {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ } else if (RO_V_VLX == instr_temp) {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ } else if (RO_V_VLSEG2 == instr_temp || RO_V_VLSEG3 == instr_temp ||
+ RO_V_VLSEG4 == instr_temp || RO_V_VLSEG5 == instr_temp ||
+ RO_V_VLSEG6 == instr_temp || RO_V_VLSEG7 == instr_temp ||
+ RO_V_VLSEG8 == instr_temp) {
+ if (!(instr_.InstructionBits() & (kRvvRs2Mask))) {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ } else {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ }
+ } else if (RO_V_VLSSEG2 == instr_temp || RO_V_VLSSEG3 == instr_temp ||
+ RO_V_VLSSEG4 == instr_temp || RO_V_VLSSEG5 == instr_temp ||
+ RO_V_VLSSEG6 == instr_temp || RO_V_VLSSEG7 == instr_temp ||
+ RO_V_VLSSEG8 == instr_temp) {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ } else if (RO_V_VLXSEG2 == instr_temp || RO_V_VLXSEG3 == instr_temp ||
+ RO_V_VLXSEG4 == instr_temp || RO_V_VLXSEG5 == instr_temp ||
+ RO_V_VLXSEG6 == instr_temp || RO_V_VLXSEG7 == instr_temp ||
+ RO_V_VLXSEG8 == instr_temp) {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ } else {
+ return false;
+ }
+}
+
+bool Simulator::DecodeRvvVS() {
+ uint32_t instr_temp =
+ instr_.InstructionBits() & (kRvvMopMask | kRvvNfMask | kBaseOpcodeMask);
+ if (RO_V_VS == instr_temp) {
+ if (!(instr_.InstructionBits() & (kRvvRs2Mask))) {
+ switch (instr_.vl_vs_width()) {
+ case 8: {
+ RVV_VI_ST(0, (i * nf + fn), uint8, false);
+ break;
+ }
+ case 16: {
+ RVV_VI_ST(0, (i * nf + fn), uint16, false);
+ break;
+ }
+ case 32: {
+ RVV_VI_ST(0, (i * nf + fn), uint32, false);
+ break;
+ }
+ case 64: {
+ RVV_VI_ST(0, (i * nf + fn), uint64, false);
+ break;
+ }
+ default:
+ UNIMPLEMENTED_RISCV();
+ break;
+ }
+ } else {
+ UNIMPLEMENTED_RISCV();
+ }
+ return true;
+ } else if (RO_V_VSS == instr_temp) {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ } else if (RO_V_VSX == instr_temp) {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ } else if (RO_V_VSU == instr_temp) {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ } else if (RO_V_VSSEG2 == instr_temp || RO_V_VSSEG3 == instr_temp ||
+ RO_V_VSSEG4 == instr_temp || RO_V_VSSEG5 == instr_temp ||
+ RO_V_VSSEG6 == instr_temp || RO_V_VSSEG7 == instr_temp ||
+ RO_V_VSSEG8 == instr_temp) {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ } else if (RO_V_VSSSEG2 == instr_temp || RO_V_VSSSEG3 == instr_temp ||
+ RO_V_VSSSEG4 == instr_temp || RO_V_VSSSEG5 == instr_temp ||
+ RO_V_VSSSEG6 == instr_temp || RO_V_VSSSEG7 == instr_temp ||
+ RO_V_VSSSEG8 == instr_temp) {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ } else if (RO_V_VSXSEG2 == instr_temp || RO_V_VSXSEG3 == instr_temp ||
+ RO_V_VSXSEG4 == instr_temp || RO_V_VSXSEG5 == instr_temp ||
+ RO_V_VSXSEG6 == instr_temp || RO_V_VSXSEG7 == instr_temp ||
+ RO_V_VSXSEG8 == instr_temp) {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ } else {
+ return false;
+ }
+}
+# endif
+
+void Simulator::DecodeRVIType() {
+ switch (instr_.InstructionBits() & kITypeMask) {
+ case RO_JALR: {
+ set_rd(get_pc() + kInstrSize);
+ // Note: No need to shift 2 for JALR's imm12, but set lowest bit to 0.
+ sreg_t next_pc = (rs1() + imm12()) & ~sreg_t(1);
+ set_pc(next_pc);
+ break;
+ }
+ case RO_LB: {
+ sreg_t addr = rs1() + imm12();
+ int8_t val = ReadMem<int8_t>(addr, instr_.instr());
+ set_rd(sext_xlen(val), false);
+ TraceMemRd(addr, val, getRegister(rd_reg()));
+ break;
+ }
+ case RO_LH: {
+ sreg_t addr = rs1() + imm12();
+ int16_t val = ReadMem<int16_t>(addr, instr_.instr());
+ set_rd(sext_xlen(val), false);
+ TraceMemRd(addr, val, getRegister(rd_reg()));
+ break;
+ }
+ case RO_LW: {
+ sreg_t addr = rs1() + imm12();
+ int32_t val = ReadMem<int32_t>(addr, instr_.instr());
+ set_rd(sext_xlen(val), false);
+ TraceMemRd(addr, val, getRegister(rd_reg()));
+ break;
+ }
+ case RO_LBU: {
+ sreg_t addr = rs1() + imm12();
+ uint8_t val = ReadMem<uint8_t>(addr, instr_.instr());
+ set_rd(zext_xlen(val), false);
+ TraceMemRd(addr, val, getRegister(rd_reg()));
+ break;
+ }
+ case RO_LHU: {
+ sreg_t addr = rs1() + imm12();
+ uint16_t val = ReadMem<uint16_t>(addr, instr_.instr());
+ set_rd(zext_xlen(val), false);
+ TraceMemRd(addr, val, getRegister(rd_reg()));
+ break;
+ }
+# ifdef JS_CODEGEN_RISCV64
+ case RO_LWU: {
+ int64_t addr = rs1() + imm12();
+ uint32_t val = ReadMem<uint32_t>(addr, instr_.instr());
+ set_rd(zext_xlen(val), false);
+ TraceMemRd(addr, val, getRegister(rd_reg()));
+ break;
+ }
+ case RO_LD: {
+ int64_t addr = rs1() + imm12();
+ int64_t val = ReadMem<int64_t>(addr, instr_.instr());
+ set_rd(sext_xlen(val), false);
+ TraceMemRd(addr, val, getRegister(rd_reg()));
+ break;
+ }
+# endif /*JS_CODEGEN_RISCV64*/
+ case RO_ADDI: {
+ set_rd(sext_xlen(rs1() + imm12()));
+ break;
+ }
+ case RO_SLTI: {
+ set_rd(sreg_t(rs1()) < sreg_t(imm12()));
+ break;
+ }
+ case RO_SLTIU: {
+ set_rd(reg_t(rs1()) < reg_t(imm12()));
+ break;
+ }
+ case RO_XORI: {
+ set_rd(imm12() ^ rs1());
+ break;
+ }
+ case RO_ORI: {
+ set_rd(imm12() | rs1());
+ break;
+ }
+ case RO_ANDI: {
+ set_rd(imm12() & rs1());
+ break;
+ }
+ case RO_SLLI: {
+ require(shamt6() < xlen);
+ set_rd(sext_xlen(rs1() << shamt6()));
+ break;
+ }
+ case RO_SRLI: { // RO_SRAI
+ if (!instr_.IsArithShift()) {
+ require(shamt6() < xlen);
+ set_rd(sext_xlen(zext_xlen(rs1()) >> shamt6()));
+ } else {
+ require(shamt6() < xlen);
+ set_rd(sext_xlen(sext_xlen(rs1()) >> shamt6()));
+ }
+ break;
+ }
+# ifdef JS_CODEGEN_RISCV64
+ case RO_ADDIW: {
+ set_rd(sext32(rs1() + imm12()));
+ break;
+ }
+ case RO_SLLIW: {
+ set_rd(sext32(rs1() << shamt5()));
+ break;
+ }
+ case RO_SRLIW: { // RO_SRAIW
+ if (!instr_.IsArithShift()) {
+ set_rd(sext32(uint32_t(rs1()) >> shamt5()));
+ } else {
+ set_rd(sext32(int32_t(rs1()) >> shamt5()));
+ }
+ break;
+ }
+# endif /*JS_CODEGEN_RISCV64*/
+ case RO_FENCE: {
+ // DO nothing in sumulator
+ break;
+ }
+ case RO_ECALL: { // RO_EBREAK
+ if (instr_.Imm12Value() == 0) { // ECALL
+ SoftwareInterrupt();
+ } else if (instr_.Imm12Value() == 1) { // EBREAK
+ uint8_t code = get_ebreak_code(instr_.instr());
+ if (code == kWasmTrapCode) {
+ uint8_t* newPC;
+ if (wasm::HandleIllegalInstruction(registerState(), &newPC)) {
+ set_pc(int64_t(newPC));
+ return;
+ }
+ }
+ SoftwareInterrupt();
+ } else {
+ UNSUPPORTED();
+ }
+ break;
+ }
+ // TODO(riscv): use Zifencei Standard Extension macro block
+ case RO_FENCE_I: {
+ // spike: flush icache.
+ break;
+ }
+ // TODO(riscv): use Zicsr Standard Extension macro block
+ case RO_CSRRW: {
+ if (rd_reg() != zero_reg) {
+ set_rd(zext_xlen(read_csr_value(csr_reg())));
+ }
+ write_csr_value(csr_reg(), rs1());
+ break;
+ }
+ case RO_CSRRS: {
+ set_rd(zext_xlen(read_csr_value(csr_reg())));
+ if (rs1_reg() != zero_reg) {
+ set_csr_bits(csr_reg(), rs1());
+ }
+ break;
+ }
+ case RO_CSRRC: {
+ set_rd(zext_xlen(read_csr_value(csr_reg())));
+ if (rs1_reg() != zero_reg) {
+ clear_csr_bits(csr_reg(), rs1());
+ }
+ break;
+ }
+ case RO_CSRRWI: {
+ if (rd_reg() != zero_reg) {
+ set_rd(zext_xlen(read_csr_value(csr_reg())));
+ }
+ write_csr_value(csr_reg(), imm5CSR());
+ break;
+ }
+ case RO_CSRRSI: {
+ set_rd(zext_xlen(read_csr_value(csr_reg())));
+ if (imm5CSR() != 0) {
+ set_csr_bits(csr_reg(), imm5CSR());
+ }
+ break;
+ }
+ case RO_CSRRCI: {
+ set_rd(zext_xlen(read_csr_value(csr_reg())));
+ if (imm5CSR() != 0) {
+ clear_csr_bits(csr_reg(), imm5CSR());
+ }
+ break;
+ }
+ // TODO(riscv): use F Extension macro block
+ case RO_FLW: {
+ sreg_t addr = rs1() + imm12();
+ uint32_t val = ReadMem<uint32_t>(addr, instr_.instr());
+ set_frd(Float32::FromBits(val), false);
+ TraceMemRdFloat(addr, Float32::FromBits(val), getFpuRegister(frd_reg()));
+ break;
+ }
+ // TODO(riscv): use D Extension macro block
+ case RO_FLD: {
+ sreg_t addr = rs1() + imm12();
+ uint64_t val = ReadMem<uint64_t>(addr, instr_.instr());
+ set_drd(Float64::FromBits(val), false);
+ TraceMemRdDouble(addr, Float64::FromBits(val), getFpuRegister(frd_reg()));
+ break;
+ }
+ default: {
+# ifdef CAN_USE_RVV_INSTRUCTIONS
+ if (!DecodeRvvVL()) {
+ UNSUPPORTED();
+ }
+ break;
+# else
+ UNSUPPORTED();
+# endif
+ }
+ }
+}
+
+void Simulator::DecodeRVSType() {
+ switch (instr_.InstructionBits() & kSTypeMask) {
+ case RO_SB:
+ WriteMem<uint8_t>(rs1() + s_imm12(), (uint8_t)rs2(), instr_.instr());
+ break;
+ case RO_SH:
+ WriteMem<uint16_t>(rs1() + s_imm12(), (uint16_t)rs2(), instr_.instr());
+ break;
+ case RO_SW:
+ WriteMem<uint32_t>(rs1() + s_imm12(), (uint32_t)rs2(), instr_.instr());
+ break;
+# ifdef JS_CODEGEN_RISCV64
+ case RO_SD:
+ WriteMem<uint64_t>(rs1() + s_imm12(), (uint64_t)rs2(), instr_.instr());
+ break;
+# endif /*JS_CODEGEN_RISCV64*/
+ // TODO(riscv): use F Extension macro block
+ case RO_FSW: {
+ WriteMem<Float32>(rs1() + s_imm12(), getFpuRegisterFloat32(rs2_reg()),
+ instr_.instr());
+ break;
+ }
+ // TODO(riscv): use D Extension macro block
+ case RO_FSD: {
+ WriteMem<Float64>(rs1() + s_imm12(), getFpuRegisterFloat64(rs2_reg()),
+ instr_.instr());
+ break;
+ }
+ default:
+# ifdef CAN_USE_RVV_INSTRUCTIONS
+ if (!DecodeRvvVS()) {
+ UNSUPPORTED();
+ }
+ break;
+# else
+ UNSUPPORTED();
+# endif
+ }
+}
+
+void Simulator::DecodeRVBType() {
+ switch (instr_.InstructionBits() & kBTypeMask) {
+ case RO_BEQ:
+ if (rs1() == rs2()) {
+ int64_t next_pc = get_pc() + boffset();
+ set_pc(next_pc);
+ }
+ break;
+ case RO_BNE:
+ if (rs1() != rs2()) {
+ int64_t next_pc = get_pc() + boffset();
+ set_pc(next_pc);
+ }
+ break;
+ case RO_BLT:
+ if (rs1() < rs2()) {
+ int64_t next_pc = get_pc() + boffset();
+ set_pc(next_pc);
+ }
+ break;
+ case RO_BGE:
+ if (rs1() >= rs2()) {
+ int64_t next_pc = get_pc() + boffset();
+ set_pc(next_pc);
+ }
+ break;
+ case RO_BLTU:
+ if ((reg_t)rs1() < (reg_t)rs2()) {
+ int64_t next_pc = get_pc() + boffset();
+ set_pc(next_pc);
+ }
+ break;
+ case RO_BGEU:
+ if ((reg_t)rs1() >= (reg_t)rs2()) {
+ int64_t next_pc = get_pc() + boffset();
+ set_pc(next_pc);
+ }
+ break;
+ default:
+ UNSUPPORTED();
+ }
+}
+void Simulator::DecodeRVUType() {
+ // U Type doesn't have additoinal mask
+ switch (instr_.BaseOpcodeFieldRaw()) {
+ case LUI:
+ set_rd(u_imm20());
+ break;
+ case AUIPC:
+ set_rd(sext_xlen(u_imm20() + get_pc()));
+ break;
+ default:
+ UNSUPPORTED();
+ }
+}
+void Simulator::DecodeRVJType() {
+ // J Type doesn't have additional mask
+ switch (instr_.BaseOpcodeValue()) {
+ case JAL: {
+ set_rd(get_pc() + kInstrSize);
+ int64_t next_pc = get_pc() + imm20J();
+ set_pc(next_pc);
+ break;
+ }
+ default:
+ UNSUPPORTED();
+ }
+}
+void Simulator::DecodeCRType() {
+ switch (instr_.RvcFunct4Value()) {
+ case 0b1000:
+ if (instr_.RvcRs1Value() != 0 && instr_.RvcRs2Value() == 0) { // c.jr
+ set_pc(rvc_rs1());
+ } else if (instr_.RvcRdValue() != 0 &&
+ instr_.RvcRs2Value() != 0) { // c.mv
+ set_rvc_rd(sext_xlen(rvc_rs2()));
+ } else {
+ UNSUPPORTED();
+ }
+ break;
+ case 0b1001:
+ if (instr_.RvcRs1Value() == 0 && instr_.RvcRs2Value() == 0) { // c.ebreak
+ DieOrDebug();
+ } else if (instr_.RvcRdValue() != 0 &&
+ instr_.RvcRs2Value() == 0) { // c.jalr
+ setRegister(ra, get_pc() + kShortInstrSize);
+ set_pc(rvc_rs1());
+ } else if (instr_.RvcRdValue() != 0 &&
+ instr_.RvcRs2Value() != 0) { // c.add
+ set_rvc_rd(sext_xlen(rvc_rs1() + rvc_rs2()));
+ } else {
+ UNSUPPORTED();
+ }
+ break;
+ default:
+ UNSUPPORTED();
+ }
+}
+
+void Simulator::DecodeCAType() {
+ switch (instr_.InstructionBits() & kCATypeMask) {
+ case RO_C_SUB:
+ set_rvc_rs1s(sext_xlen(rvc_rs1s() - rvc_rs2s()));
+ break;
+ case RO_C_XOR:
+ set_rvc_rs1s(rvc_rs1s() ^ rvc_rs2s());
+ break;
+ case RO_C_OR:
+ set_rvc_rs1s(rvc_rs1s() | rvc_rs2s());
+ break;
+ case RO_C_AND:
+ set_rvc_rs1s(rvc_rs1s() & rvc_rs2s());
+ break;
+# if JS_CODEGEN_RISCV64
+ case RO_C_SUBW:
+ set_rvc_rs1s(sext32(rvc_rs1s() - rvc_rs2s()));
+ break;
+ case RO_C_ADDW:
+ set_rvc_rs1s(sext32(rvc_rs1s() + rvc_rs2s()));
+ break;
+# endif
+ default:
+ UNSUPPORTED();
+ }
+}
+
+void Simulator::DecodeCIType() {
+ switch (instr_.RvcOpcode()) {
+ case RO_C_NOP_ADDI:
+ if (instr_.RvcRdValue() == 0) // c.nop
+ break;
+ else // c.addi
+ set_rvc_rd(sext_xlen(rvc_rs1() + rvc_imm6()));
+ break;
+# if JS_CODEGEN_RISCV64
+ case RO_C_ADDIW:
+ set_rvc_rd(sext32(rvc_rs1() + rvc_imm6()));
+ break;
+# endif
+ case RO_C_LI:
+ set_rvc_rd(sext_xlen(rvc_imm6()));
+ break;
+ case RO_C_LUI_ADD:
+ if (instr_.RvcRdValue() == 2) {
+ // c.addi16sp
+ int64_t value = getRegister(sp) + rvc_imm6_addi16sp();
+ setRegister(sp, value);
+ } else if (instr_.RvcRdValue() != 0 && instr_.RvcRdValue() != 2) {
+ // c.lui
+ set_rvc_rd(rvc_u_imm6());
+ } else {
+ UNSUPPORTED();
+ }
+ break;
+ case RO_C_SLLI:
+ set_rvc_rd(sext_xlen(rvc_rs1() << rvc_shamt6()));
+ break;
+ case RO_C_FLDSP: {
+ sreg_t addr = getRegister(sp) + rvc_imm6_ldsp();
+ uint64_t val = ReadMem<uint64_t>(addr, instr_.instr());
+ set_rvc_drd(Float64::FromBits(val), false);
+ TraceMemRdDouble(addr, Float64::FromBits(val),
+ getFpuRegister(rvc_frd_reg()));
+ break;
+ }
+# if JS_CODEGEN_RISCV64
+ case RO_C_LWSP: {
+ sreg_t addr = getRegister(sp) + rvc_imm6_lwsp();
+ int64_t val = ReadMem<int32_t>(addr, instr_.instr());
+ set_rvc_rd(sext_xlen(val), false);
+ TraceMemRd(addr, val, getRegister(rvc_rd_reg()));
+ break;
+ }
+ case RO_C_LDSP: {
+ sreg_t addr = getRegister(sp) + rvc_imm6_ldsp();
+ int64_t val = ReadMem<int64_t>(addr, instr_.instr());
+ set_rvc_rd(sext_xlen(val), false);
+ TraceMemRd(addr, val, getRegister(rvc_rd_reg()));
+ break;
+ }
+# elif JS_CODEGEN_RISCV32
+ case RO_C_FLWSP: {
+ sreg_t addr = getRegister(sp) + rvc_imm6_ldsp();
+ uint32_t val = ReadMem<uint32_t>(addr, instr_.instr());
+ set_rvc_frd(Float32::FromBits(val), false);
+ TraceMemRdFloat(addr, Float32::FromBits(val),
+ getFpuRegister(rvc_frd_reg()));
+ break;
+ }
+ case RO_C_LWSP: {
+ sreg_t addr = getRegister(sp) + rvc_imm6_lwsp();
+ int32_t val = ReadMem<int32_t>(addr, instr_.instr());
+ set_rvc_rd(sext_xlen(val), false);
+ TraceMemRd(addr, val, getRegister(rvc_rd_reg()));
+ break;
+ }
+# endif
+ default:
+ UNSUPPORTED();
+ }
+}
+
+void Simulator::DecodeCIWType() {
+ switch (instr_.RvcOpcode()) {
+ case RO_C_ADDI4SPN: {
+ set_rvc_rs2s(getRegister(sp) + rvc_imm8_addi4spn());
+ break;
+ default:
+ UNSUPPORTED();
+ }
+ }
+}
+
+void Simulator::DecodeCSSType() {
+ switch (instr_.RvcOpcode()) {
+ case RO_C_FSDSP: {
+ sreg_t addr = getRegister(sp) + rvc_imm6_sdsp();
+ WriteMem<Float64>(addr, getFpuRegisterFloat64(rvc_rs2_reg()),
+ instr_.instr());
+ break;
+ }
+# if JS_CODEGEN_RISCV32
+ case RO_C_FSWSP: {
+ sreg_t addr = getRegister(sp) + rvc_imm6_sdsp();
+ WriteMem<Float32>(addr, getFpuRegisterFloat32(rvc_rs2_reg()),
+ instr_.instr());
+ break;
+ }
+# endif
+ case RO_C_SWSP: {
+ sreg_t addr = getRegister(sp) + rvc_imm6_swsp();
+ WriteMem<int32_t>(addr, (int32_t)rvc_rs2(), instr_.instr());
+ break;
+ }
+# if JS_CODEGEN_RISCV64
+ case RO_C_SDSP: {
+ sreg_t addr = getRegister(sp) + rvc_imm6_sdsp();
+ WriteMem<int64_t>(addr, (int64_t)rvc_rs2(), instr_.instr());
+ break;
+ }
+# endif
+ default:
+ UNSUPPORTED();
+ }
+}
+
+void Simulator::DecodeCLType() {
+ switch (instr_.RvcOpcode()) {
+ case RO_C_LW: {
+ sreg_t addr = rvc_rs1s() + rvc_imm5_w();
+ int64_t val = ReadMem<int32_t>(addr, instr_.instr());
+ set_rvc_rs2s(sext_xlen(val), false);
+ TraceMemRd(addr, val, getRegister(rvc_rs2s_reg()));
+ break;
+ }
+ case RO_C_FLD: {
+ sreg_t addr = rvc_rs1s() + rvc_imm5_d();
+ uint64_t val = ReadMem<uint64_t>(addr, instr_.instr());
+ set_rvc_drs2s(Float64::FromBits(val), false);
+ break;
+ }
+# if JS_CODEGEN_RISCV64
+ case RO_C_LD: {
+ sreg_t addr = rvc_rs1s() + rvc_imm5_d();
+ int64_t val = ReadMem<int64_t>(addr, instr_.instr());
+ set_rvc_rs2s(sext_xlen(val), false);
+ TraceMemRd(addr, val, getRegister(rvc_rs2s_reg()));
+ break;
+ }
+# elif JS_CODEGEN_RISCV32
+ case RO_C_FLW: {
+ sreg_t addr = rvc_rs1s() + rvc_imm5_d();
+ uint32_t val = ReadMem<uint32_t>(addr, instr_.instr());
+ set_rvc_frs2s(Float32::FromBits(val), false);
+ break;
+ }
+# endif
+ default:
+ UNSUPPORTED();
+ }
+}
+
+void Simulator::DecodeCSType() {
+ switch (instr_.RvcOpcode()) {
+ case RO_C_SW: {
+ sreg_t addr = rvc_rs1s() + rvc_imm5_w();
+ WriteMem<int32_t>(addr, (int32_t)rvc_rs2s(), instr_.instr());
+ break;
+ }
+# if JS_CODEGEN_RISCV64
+ case RO_C_SD: {
+ sreg_t addr = rvc_rs1s() + rvc_imm5_d();
+ WriteMem<int64_t>(addr, (int64_t)rvc_rs2s(), instr_.instr());
+ break;
+ }
+# endif
+ case RO_C_FSD: {
+ sreg_t addr = rvc_rs1s() + rvc_imm5_d();
+ WriteMem<double>(addr, static_cast<double>(rvc_drs2s()), instr_.instr());
+ break;
+ }
+ default:
+ UNSUPPORTED();
+ }
+}
+
+void Simulator::DecodeCJType() {
+ switch (instr_.RvcOpcode()) {
+ case RO_C_J: {
+ set_pc(get_pc() + instr_.RvcImm11CJValue());
+ break;
+ }
+ default:
+ UNSUPPORTED();
+ }
+}
+
+void Simulator::DecodeCBType() {
+ switch (instr_.RvcOpcode()) {
+ case RO_C_BNEZ:
+ if (rvc_rs1() != 0) {
+ sreg_t next_pc = get_pc() + rvc_imm8_b();
+ set_pc(next_pc);
+ }
+ break;
+ case RO_C_BEQZ:
+ if (rvc_rs1() == 0) {
+ sreg_t next_pc = get_pc() + rvc_imm8_b();
+ set_pc(next_pc);
+ }
+ break;
+ case RO_C_MISC_ALU:
+ if (instr_.RvcFunct2BValue() == 0b00) { // c.srli
+ set_rvc_rs1s(sext_xlen(sext_xlen(rvc_rs1s()) >> rvc_shamt6()));
+ } else if (instr_.RvcFunct2BValue() == 0b01) { // c.srai
+ require(rvc_shamt6() < xlen);
+ set_rvc_rs1s(sext_xlen(sext_xlen(rvc_rs1s()) >> rvc_shamt6()));
+ } else if (instr_.RvcFunct2BValue() == 0b10) { // c.andi
+ set_rvc_rs1s(rvc_imm6() & rvc_rs1s());
+ } else {
+ UNSUPPORTED();
+ }
+ break;
+ default:
+ UNSUPPORTED();
+ }
+}
+
+void Simulator::callInternal(uint8_t* entry) {
+ // Prepare to execute the code at entry.
+ setRegister(pc, reinterpret_cast<int64_t>(entry));
+ // Put down marker for end of simulation. The simulator will stop simulation
+ // when the PC reaches this value. By saving the "end simulation" value into
+ // the LR the simulation stops when returning to this call point.
+ setRegister(ra, end_sim_pc);
+ // Remember the values of callee-saved registers.
+ intptr_t s0_val = getRegister(Simulator::Register::fp);
+ intptr_t s1_val = getRegister(Simulator::Register::s1);
+ intptr_t s2_val = getRegister(Simulator::Register::s2);
+ intptr_t s3_val = getRegister(Simulator::Register::s3);
+ intptr_t s4_val = getRegister(Simulator::Register::s4);
+ intptr_t s5_val = getRegister(Simulator::Register::s5);
+ intptr_t s6_val = getRegister(Simulator::Register::s6);
+ intptr_t s7_val = getRegister(Simulator::Register::s7);
+ intptr_t s8_val = getRegister(Simulator::Register::s8);
+ intptr_t s9_val = getRegister(Simulator::Register::s9);
+ intptr_t s10_val = getRegister(Simulator::Register::s10);
+ intptr_t s11_val = getRegister(Simulator::Register::s11);
+ intptr_t gp_val = getRegister(Simulator::Register::gp);
+ intptr_t sp_val = getRegister(Simulator::Register::sp);
+
+ // Set up the callee-saved registers with a known value. To be able to check
+ // that they are preserved properly across JS execution. If this value is
+ // small int, it should be SMI.
+ intptr_t callee_saved_value = icount_;
+ setRegister(Simulator::Register::fp, callee_saved_value);
+ setRegister(Simulator::Register::s1, callee_saved_value);
+ setRegister(Simulator::Register::s2, callee_saved_value);
+ setRegister(Simulator::Register::s3, callee_saved_value);
+ setRegister(Simulator::Register::s4, callee_saved_value);
+ setRegister(Simulator::Register::s5, callee_saved_value);
+ setRegister(Simulator::Register::s6, callee_saved_value);
+ setRegister(Simulator::Register::s7, callee_saved_value);
+ setRegister(Simulator::Register::s8, callee_saved_value);
+ setRegister(Simulator::Register::s9, callee_saved_value);
+ setRegister(Simulator::Register::s10, callee_saved_value);
+ setRegister(Simulator::Register::s11, callee_saved_value);
+ setRegister(Simulator::Register::gp, callee_saved_value);
+
+ // Start the simulation.
+ if (Simulator::StopSimAt != -1) {
+ execute<true>();
+ } else {
+ execute<false>();
+ }
+
+ // Check that the callee-saved registers have been preserved.
+ MOZ_ASSERT(callee_saved_value == getRegister(Simulator::Register::fp));
+ MOZ_ASSERT(callee_saved_value == getRegister(Simulator::Register::s1));
+ MOZ_ASSERT(callee_saved_value == getRegister(Simulator::Register::s2));
+ MOZ_ASSERT(callee_saved_value == getRegister(Simulator::Register::s3));
+ MOZ_ASSERT(callee_saved_value == getRegister(Simulator::Register::s4));
+ MOZ_ASSERT(callee_saved_value == getRegister(Simulator::Register::s5));
+ MOZ_ASSERT(callee_saved_value == getRegister(Simulator::Register::s6));
+ MOZ_ASSERT(callee_saved_value == getRegister(Simulator::Register::s7));
+ MOZ_ASSERT(callee_saved_value == getRegister(Simulator::Register::s8));
+ MOZ_ASSERT(callee_saved_value == getRegister(Simulator::Register::s9));
+ MOZ_ASSERT(callee_saved_value == getRegister(Simulator::Register::s10));
+ MOZ_ASSERT(callee_saved_value == getRegister(Simulator::Register::s11));
+ MOZ_ASSERT(callee_saved_value == getRegister(Simulator::Register::gp));
+
+ // Restore callee-saved registers with the original value.
+ setRegister(Simulator::Register::fp, s0_val);
+ setRegister(Simulator::Register::s1, s1_val);
+ setRegister(Simulator::Register::s2, s2_val);
+ setRegister(Simulator::Register::s3, s3_val);
+ setRegister(Simulator::Register::s4, s4_val);
+ setRegister(Simulator::Register::s5, s5_val);
+ setRegister(Simulator::Register::s6, s6_val);
+ setRegister(Simulator::Register::s7, s7_val);
+ setRegister(Simulator::Register::s8, s8_val);
+ setRegister(Simulator::Register::s9, s9_val);
+ setRegister(Simulator::Register::s10, s10_val);
+ setRegister(Simulator::Register::s11, s11_val);
+ setRegister(Simulator::Register::gp, gp_val);
+ setRegister(Simulator::Register::sp, sp_val);
+}
+
+int64_t Simulator::call(uint8_t* entry, int argument_count, ...) {
+ va_list parameters;
+ va_start(parameters, argument_count);
+
+ int64_t original_stack = getRegister(sp);
+ // Compute position of stack on entry to generated code.
+ int64_t entry_stack = original_stack;
+ if (argument_count > kCArgSlotCount) {
+ entry_stack = entry_stack - argument_count * sizeof(int64_t);
+ } else {
+ entry_stack = entry_stack - kCArgsSlotsSize;
+ }
+
+ entry_stack &= ~U64(ABIStackAlignment - 1);
+
+ intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
+
+ // Setup the arguments.
+ for (int i = 0; i < argument_count; i++) {
+ js::jit::Register argReg;
+ if (GetIntArgReg(i, &argReg)) {
+ setRegister(argReg.code(), va_arg(parameters, int64_t));
+ } else {
+ stack_argument[i] = va_arg(parameters, int64_t);
+ }
+ }
+
+ va_end(parameters);
+ setRegister(sp, entry_stack);
+
+ callInternal(entry);
+
+ // Pop stack passed arguments.
+ MOZ_ASSERT(entry_stack == getRegister(sp));
+ setRegister(sp, original_stack);
+
+ int64_t result = getRegister(a0);
+ return result;
+}
+
+uintptr_t Simulator::pushAddress(uintptr_t address) {
+ int new_sp = getRegister(sp) - sizeof(uintptr_t);
+ uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
+ *stack_slot = address;
+ setRegister(sp, new_sp);
+ return new_sp;
+}
+
+uintptr_t Simulator::popAddress() {
+ int current_sp = getRegister(sp);
+ uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp);
+ uintptr_t address = *stack_slot;
+ setRegister(sp, current_sp + sizeof(uintptr_t));
+ return address;
+}
+
+} // namespace jit
+} // namespace js
+
+js::jit::Simulator* JSContext::simulator() const { return simulator_; }
+
+#endif // JS_SIMULATOR_RISCV64
diff --git a/js/src/jit/riscv64/Simulator-riscv64.h b/js/src/jit/riscv64/Simulator-riscv64.h
new file mode 100644
index 0000000000..20a3f6e97c
--- /dev/null
+++ b/js/src/jit/riscv64/Simulator-riscv64.h
@@ -0,0 +1,1281 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef jit_riscv64_Simulator_riscv64_h
+#define jit_riscv64_Simulator_riscv64_h
+
+#ifdef JS_SIMULATOR_RISCV64
+# include "mozilla/Atomics.h"
+
+# include <vector>
+
+# include "jit/IonTypes.h"
+# include "jit/riscv64/constant/Constant-riscv64.h"
+# include "jit/riscv64/constant/util-riscv64.h"
+# include "jit/riscv64/disasm/Disasm-riscv64.h"
+# include "js/ProfilingFrameIterator.h"
+# include "threading/Thread.h"
+# include "vm/MutexIDs.h"
+# include "wasm/WasmSignalHandlers.h"
+
+namespace js {
+
+namespace jit {
+
+template <class Dest, class Source>
+inline Dest bit_cast(const Source& source) {
+ static_assert(sizeof(Dest) == sizeof(Source),
+ "bit_cast requires source and destination to be the same size");
+ static_assert(std::is_trivially_copyable<Dest>::value,
+ "bit_cast requires the destination type to be copyable");
+ static_assert(std::is_trivially_copyable<Source>::value,
+ "bit_cast requires the source type to be copyable");
+
+ Dest dest;
+ memcpy(&dest, &source, sizeof(dest));
+ return dest;
+}
+
+# define ASSERT_TRIVIALLY_COPYABLE(T) \
+ static_assert(std::is_trivially_copyable<T>::value, \
+ #T " should be trivially copyable")
+# define ASSERT_NOT_TRIVIALLY_COPYABLE(T) \
+ static_assert(!std::is_trivially_copyable<T>::value, \
+ #T " should not be trivially copyable")
+
+constexpr uint32_t kHoleNanUpper32 = 0xFFF7FFFF;
+constexpr uint32_t kHoleNanLower32 = 0xFFF7FFFF;
+
+constexpr uint64_t kHoleNanInt64 =
+ (static_cast<uint64_t>(kHoleNanUpper32) << 32) | kHoleNanLower32;
+// Safety wrapper for a 32-bit floating-point value to make sure we don't lose
+// the exact bit pattern during deoptimization when passing this value.
+class Float32 {
+ public:
+ Float32() = default;
+
+ // This constructor does not guarantee that bit pattern of the input value
+ // is preserved if the input is a NaN.
+ explicit Float32(float value) : bit_pattern_(bit_cast<uint32_t>(value)) {
+ // Check that the provided value is not a NaN, because the bit pattern of a
+ // NaN may be changed by a bit_cast, e.g. for signalling NaNs on
+ // ia32.
+ MOZ_ASSERT(!std::isnan(value));
+ }
+
+ uint32_t get_bits() const { return bit_pattern_; }
+
+ float get_scalar() const { return bit_cast<float>(bit_pattern_); }
+
+ bool is_nan() const {
+ // Even though {get_scalar()} might flip the quiet NaN bit, it's ok here,
+ // because this does not change the is_nan property.
+ return std::isnan(get_scalar());
+ }
+
+ // Return a pointer to the field storing the bit pattern. Used in code
+ // generation tests to store generated values there directly.
+ uint32_t* get_bits_address() { return &bit_pattern_; }
+
+ static constexpr Float32 FromBits(uint32_t bits) { return Float32(bits); }
+
+ private:
+ uint32_t bit_pattern_ = 0;
+
+ explicit constexpr Float32(uint32_t bit_pattern)
+ : bit_pattern_(bit_pattern) {}
+};
+
+ASSERT_TRIVIALLY_COPYABLE(Float32);
+
+// Safety wrapper for a 64-bit floating-point value to make sure we don't lose
+// the exact bit pattern during deoptimization when passing this value.
+// TODO(ahaas): Unify this class with Double in double.h
+class Float64 {
+ public:
+ Float64() = default;
+
+ // This constructor does not guarantee that bit pattern of the input value
+ // is preserved if the input is a NaN.
+ explicit Float64(double value) : bit_pattern_(bit_cast<uint64_t>(value)) {
+ // Check that the provided value is not a NaN, because the bit pattern of a
+ // NaN may be changed by a bit_cast, e.g. for signalling NaNs on
+ // ia32.
+ MOZ_ASSERT(!std::isnan(value));
+ }
+
+ uint64_t get_bits() const { return bit_pattern_; }
+ double get_scalar() const { return bit_cast<double>(bit_pattern_); }
+ bool is_hole_nan() const { return bit_pattern_ == kHoleNanInt64; }
+ bool is_nan() const {
+ // Even though {get_scalar()} might flip the quiet NaN bit, it's ok here,
+ // because this does not change the is_nan property.
+ return std::isnan(get_scalar());
+ }
+
+ // Return a pointer to the field storing the bit pattern. Used in code
+ // generation tests to store generated values there directly.
+ uint64_t* get_bits_address() { return &bit_pattern_; }
+
+ static constexpr Float64 FromBits(uint64_t bits) { return Float64(bits); }
+
+ private:
+ uint64_t bit_pattern_ = 0;
+
+ explicit constexpr Float64(uint64_t bit_pattern)
+ : bit_pattern_(bit_pattern) {}
+};
+
+ASSERT_TRIVIALLY_COPYABLE(Float64);
+
+class JitActivation;
+
+class Simulator;
+class Redirection;
+class CachePage;
+class AutoLockSimulator;
+
+// When the SingleStepCallback is called, the simulator is about to execute
+// sim->get_pc() and the current machine state represents the completed
+// execution of the previous pc.
+typedef void (*SingleStepCallback)(void* arg, Simulator* sim, void* pc);
+
+const intptr_t kPointerAlignment = 8;
+const intptr_t kPointerAlignmentMask = kPointerAlignment - 1;
+
+const intptr_t kDoubleAlignment = 8;
+const intptr_t kDoubleAlignmentMask = kDoubleAlignment - 1;
+
+// Number of general purpose registers.
+const int kNumRegisters = 32;
+
+// In the simulator, the PC register is simulated as the 34th register.
+const int kPCRegister = 32;
+
+// Number coprocessor registers.
+const int kNumFPURegisters = 32;
+
+// FPU (coprocessor 1) control registers. Currently only FCSR is implemented.
+const int kFCSRRegister = 31;
+const int kInvalidFPUControlRegister = -1;
+const uint32_t kFPUInvalidResult = static_cast<uint32_t>(1 << 31) - 1;
+const uint64_t kFPUInvalidResult64 = static_cast<uint64_t>(1ULL << 63) - 1;
+
+// FCSR constants.
+const uint32_t kFCSRInexactFlagBit = 2;
+const uint32_t kFCSRUnderflowFlagBit = 3;
+const uint32_t kFCSROverflowFlagBit = 4;
+const uint32_t kFCSRDivideByZeroFlagBit = 5;
+const uint32_t kFCSRInvalidOpFlagBit = 6;
+
+const uint32_t kFCSRInexactCauseBit = 12;
+const uint32_t kFCSRUnderflowCauseBit = 13;
+const uint32_t kFCSROverflowCauseBit = 14;
+const uint32_t kFCSRDivideByZeroCauseBit = 15;
+const uint32_t kFCSRInvalidOpCauseBit = 16;
+
+const uint32_t kFCSRInexactFlagMask = 1 << kFCSRInexactFlagBit;
+const uint32_t kFCSRUnderflowFlagMask = 1 << kFCSRUnderflowFlagBit;
+const uint32_t kFCSROverflowFlagMask = 1 << kFCSROverflowFlagBit;
+const uint32_t kFCSRDivideByZeroFlagMask = 1 << kFCSRDivideByZeroFlagBit;
+const uint32_t kFCSRInvalidOpFlagMask = 1 << kFCSRInvalidOpFlagBit;
+
+const uint32_t kFCSRFlagMask =
+ kFCSRInexactFlagMask | kFCSRUnderflowFlagMask | kFCSROverflowFlagMask |
+ kFCSRDivideByZeroFlagMask | kFCSRInvalidOpFlagMask;
+
+const uint32_t kFCSRExceptionFlagMask = kFCSRFlagMask ^ kFCSRInexactFlagMask;
+
+// -----------------------------------------------------------------------------
+// Utility types and functions for RISCV
+# ifdef JS_CODEGEN_RISCV32
+using sreg_t = int32_t;
+using reg_t = uint32_t;
+using freg_t = uint64_t;
+using sfreg_t = int64_t;
+# elif JS_CODEGEN_RISCV64
+using sreg_t = int64_t;
+using reg_t = uint64_t;
+using freg_t = uint64_t;
+using sfreg_t = int64_t;
+# else
+# error "Cannot detect Riscv's bitwidth"
+# endif
+
+# define sext32(x) ((sreg_t)(int32_t)(x))
+# define zext32(x) ((reg_t)(uint32_t)(x))
+
+# ifdef JS_CODEGEN_RISCV64
+# define sext_xlen(x) (((sreg_t)(x) << (64 - xlen)) >> (64 - xlen))
+# define zext_xlen(x) (((reg_t)(x) << (64 - xlen)) >> (64 - xlen))
+# elif JS_CODEGEN_RISCV32
+# define sext_xlen(x) (((sreg_t)(x) << (32 - xlen)) >> (32 - xlen))
+# define zext_xlen(x) (((reg_t)(x) << (32 - xlen)) >> (32 - xlen))
+# endif
+
+# define BIT(n) (0x1LL << n)
+# define QUIET_BIT_S(nan) (bit_cast<int32_t>(nan) & BIT(22))
+# define QUIET_BIT_D(nan) (bit_cast<int64_t>(nan) & BIT(51))
+static inline bool isSnan(float fp) { return !QUIET_BIT_S(fp); }
+static inline bool isSnan(double fp) { return !QUIET_BIT_D(fp); }
+# undef QUIET_BIT_S
+# undef QUIET_BIT_D
+
+# ifdef JS_CODEGEN_RISCV64
+inline uint64_t mulhu(uint64_t a, uint64_t b) {
+ __uint128_t full_result = ((__uint128_t)a) * ((__uint128_t)b);
+ return full_result >> 64;
+}
+
+inline int64_t mulh(int64_t a, int64_t b) {
+ __int128_t full_result = ((__int128_t)a) * ((__int128_t)b);
+ return full_result >> 64;
+}
+
+inline int64_t mulhsu(int64_t a, uint64_t b) {
+ __int128_t full_result = ((__int128_t)a) * ((__uint128_t)b);
+ return full_result >> 64;
+}
+# elif JS_CODEGEN_RISCV32
+inline uint32_t mulhu(uint32_t a, uint32_t b) {
+ uint64_t full_result = ((uint64_t)a) * ((uint64_t)b);
+ uint64_t upper_part = full_result >> 32;
+ return (uint32_t)upper_part;
+}
+
+inline int32_t mulh(int32_t a, int32_t b) {
+ int64_t full_result = ((int64_t)a) * ((int64_t)b);
+ int64_t upper_part = full_result >> 32;
+ return (int32_t)upper_part;
+}
+
+inline int32_t mulhsu(int32_t a, uint32_t b) {
+ int64_t full_result = ((int64_t)a) * ((uint64_t)b);
+ int64_t upper_part = full_result >> 32;
+ return (int32_t)upper_part;
+}
+# endif
+
+// Floating point helpers
+# define F32_SIGN ((uint32_t)1 << 31)
+union u32_f32 {
+ uint32_t u;
+ float f;
+};
+inline float fsgnj32(float rs1, float rs2, bool n, bool x) {
+ u32_f32 a = {.f = rs1}, b = {.f = rs2};
+ u32_f32 res;
+ res.u = (a.u & ~F32_SIGN) | ((((x) ? a.u
+ : (n) ? F32_SIGN
+ : 0) ^
+ b.u) &
+ F32_SIGN);
+ return res.f;
+}
+
+inline Float32 fsgnj32(Float32 rs1, Float32 rs2, bool n, bool x) {
+ u32_f32 a = {.u = rs1.get_bits()}, b = {.u = rs2.get_bits()};
+ u32_f32 res;
+ if (x) { // RO_FSQNJX_S
+ res.u = (a.u & ~F32_SIGN) | ((a.u ^ b.u) & F32_SIGN);
+ } else {
+ if (n) { // RO_FSGNJN_S
+ res.u = (a.u & ~F32_SIGN) | ((F32_SIGN ^ b.u) & F32_SIGN);
+ } else { // RO_FSGNJ_S
+ res.u = (a.u & ~F32_SIGN) | ((0 ^ b.u) & F32_SIGN);
+ }
+ }
+ return Float32::FromBits(res.u);
+}
+# define F64_SIGN ((uint64_t)1 << 63)
+union u64_f64 {
+ uint64_t u;
+ double d;
+};
+inline double fsgnj64(double rs1, double rs2, bool n, bool x) {
+ u64_f64 a = {.d = rs1}, b = {.d = rs2};
+ u64_f64 res;
+ res.u = (a.u & ~F64_SIGN) | ((((x) ? a.u
+ : (n) ? F64_SIGN
+ : 0) ^
+ b.u) &
+ F64_SIGN);
+ return res.d;
+}
+
+inline Float64 fsgnj64(Float64 rs1, Float64 rs2, bool n, bool x) {
+ u64_f64 a = {.d = rs1.get_scalar()}, b = {.d = rs2.get_scalar()};
+ u64_f64 res;
+ if (x) { // RO_FSQNJX_D
+ res.u = (a.u & ~F64_SIGN) | ((a.u ^ b.u) & F64_SIGN);
+ } else {
+ if (n) { // RO_FSGNJN_D
+ res.u = (a.u & ~F64_SIGN) | ((F64_SIGN ^ b.u) & F64_SIGN);
+ } else { // RO_FSGNJ_D
+ res.u = (a.u & ~F64_SIGN) | ((0 ^ b.u) & F64_SIGN);
+ }
+ }
+ return Float64::FromBits(res.u);
+}
+inline bool is_boxed_float(int64_t v) { return (uint32_t)((v >> 32) + 1) == 0; }
+inline int64_t box_float(float v) {
+ return (0xFFFFFFFF00000000 | bit_cast<int32_t>(v));
+}
+
+inline uint64_t box_float(uint32_t v) { return (0xFFFFFFFF00000000 | v); }
+
+// -----------------------------------------------------------------------------
+// Utility functions
+
+class SimInstructionBase : public InstructionBase {
+ public:
+ Type InstructionType() const { return type_; }
+ inline Instruction* instr() const { return instr_; }
+ inline int32_t operand() const { return operand_; }
+
+ protected:
+ SimInstructionBase() : operand_(-1), instr_(nullptr), type_(kUnsupported) {}
+ explicit SimInstructionBase(Instruction* instr) {}
+
+ int32_t operand_;
+ Instruction* instr_;
+ Type type_;
+
+ private:
+ SimInstructionBase& operator=(const SimInstructionBase&) = delete;
+};
+
+class SimInstruction : public InstructionGetters<SimInstructionBase> {
+ public:
+ SimInstruction() {}
+
+ explicit SimInstruction(Instruction* instr) { *this = instr; }
+
+ SimInstruction& operator=(Instruction* instr) {
+ operand_ = *reinterpret_cast<const int32_t*>(instr);
+ instr_ = instr;
+ type_ = InstructionBase::InstructionType();
+ MOZ_ASSERT(reinterpret_cast<void*>(&operand_) == this);
+ return *this;
+ }
+};
+
+// Per thread simulator state.
+class Simulator {
+ friend class RiscvDebugger;
+
+ public:
+ static bool FLAG_riscv_trap_to_simulator_debugger;
+ static bool FLAG_trace_sim;
+ static bool FLAG_debug_sim;
+ static bool FLAG_riscv_print_watchpoint;
+ // Registers are declared in order.
+ enum Register {
+ no_reg = -1,
+ x0 = 0,
+ x1,
+ x2,
+ x3,
+ x4,
+ x5,
+ x6,
+ x7,
+ x8,
+ x9,
+ x10,
+ x11,
+ x12,
+ x13,
+ x14,
+ x15,
+ x16,
+ x17,
+ x18,
+ x19,
+ x20,
+ x21,
+ x22,
+ x23,
+ x24,
+ x25,
+ x26,
+ x27,
+ x28,
+ x29,
+ x30,
+ x31,
+ pc,
+ kNumSimuRegisters,
+ // alias
+ zero = x0,
+ ra = x1,
+ sp = x2,
+ gp = x3,
+ tp = x4,
+ t0 = x5,
+ t1 = x6,
+ t2 = x7,
+ fp = x8,
+ s1 = x9,
+ a0 = x10,
+ a1 = x11,
+ a2 = x12,
+ a3 = x13,
+ a4 = x14,
+ a5 = x15,
+ a6 = x16,
+ a7 = x17,
+ s2 = x18,
+ s3 = x19,
+ s4 = x20,
+ s5 = x21,
+ s6 = x22,
+ s7 = x23,
+ s8 = x24,
+ s9 = x25,
+ s10 = x26,
+ s11 = x27,
+ t3 = x28,
+ t4 = x29,
+ t5 = x30,
+ t6 = x31,
+ };
+
+ // Coprocessor registers.
+ enum FPURegister {
+ f0,
+ f1,
+ f2,
+ f3,
+ f4,
+ f5,
+ f6,
+ f7,
+ f8,
+ f9,
+ f10,
+ f11,
+ f12,
+ f13,
+ f14,
+ f15,
+ f16,
+ f17,
+ f18,
+ f19,
+ f20,
+ f21,
+ f22,
+ f23,
+ f24,
+ f25,
+ f26,
+ f27,
+ f28,
+ f29,
+ f30,
+ f31,
+ kNumFPURegisters,
+ // alias
+ ft0 = f0,
+ ft1 = f1,
+ ft2 = f2,
+ ft3 = f3,
+ ft4 = f4,
+ ft5 = f5,
+ ft6 = f6,
+ ft7 = f7,
+ fs0 = f8,
+ fs1 = f9,
+ fa0 = f10,
+ fa1 = f11,
+ fa2 = f12,
+ fa3 = f13,
+ fa4 = f14,
+ fa5 = f15,
+ fa6 = f16,
+ fa7 = f17,
+ fs2 = f18,
+ fs3 = f19,
+ fs4 = f20,
+ fs5 = f21,
+ fs6 = f22,
+ fs7 = f23,
+ fs8 = f24,
+ fs9 = f25,
+ fs10 = f26,
+ fs11 = f27,
+ ft8 = f28,
+ ft9 = f29,
+ ft10 = f30,
+ ft11 = f31
+ };
+
+ // Returns nullptr on OOM.
+ static Simulator* Create();
+
+ static void Destroy(Simulator* simulator);
+
+ // Constructor/destructor are for internal use only; use the static methods
+ // above.
+ Simulator();
+ ~Simulator();
+
+ // RISCV decoding routine
+ void DecodeRVRType();
+ void DecodeRVR4Type();
+ void DecodeRVRFPType(); // Special routine for R/OP_FP type
+ void DecodeRVRAType(); // Special routine for R/AMO type
+ void DecodeRVIType();
+ void DecodeRVSType();
+ void DecodeRVBType();
+ void DecodeRVUType();
+ void DecodeRVJType();
+ void DecodeCRType();
+ void DecodeCAType();
+ void DecodeCIType();
+ void DecodeCIWType();
+ void DecodeCSSType();
+ void DecodeCLType();
+ void DecodeCSType();
+ void DecodeCJType();
+ void DecodeCBType();
+# ifdef CAN_USE_RVV_INSTRUCTIONS
+ void DecodeVType();
+ void DecodeRvvIVV();
+ void DecodeRvvIVI();
+ void DecodeRvvIVX();
+ void DecodeRvvMVV();
+ void DecodeRvvMVX();
+ void DecodeRvvFVV();
+ void DecodeRvvFVF();
+ bool DecodeRvvVL();
+ bool DecodeRvvVS();
+# endif
+ // The currently executing Simulator instance. Potentially there can be one
+ // for each native thread.
+ static Simulator* Current();
+
+ static inline uintptr_t StackLimit() {
+ return Simulator::Current()->stackLimit();
+ }
+
+ uintptr_t* addressOfStackLimit();
+
+ // Accessors for register state. Reading the pc value adheres to the MIPS
+ // architecture specification and is off by a 8 from the currently executing
+ // instruction.
+ void setRegister(int reg, int64_t value);
+ int64_t getRegister(int reg) const;
+ // Same for FPURegisters.
+ void setFpuRegister(int fpureg, int64_t value);
+ void setFpuRegisterLo(int fpureg, int32_t value);
+ void setFpuRegisterHi(int fpureg, int32_t value);
+ void setFpuRegisterFloat(int fpureg, float value);
+ void setFpuRegisterDouble(int fpureg, double value);
+ void setFpuRegisterFloat(int fpureg, Float32 value);
+ void setFpuRegisterDouble(int fpureg, Float64 value);
+
+ int64_t getFpuRegister(int fpureg) const;
+ int32_t getFpuRegisterLo(int fpureg) const;
+ int32_t getFpuRegisterHi(int fpureg) const;
+ float getFpuRegisterFloat(int fpureg) const;
+ double getFpuRegisterDouble(int fpureg) const;
+ Float32 getFpuRegisterFloat32(int fpureg) const;
+ Float64 getFpuRegisterFloat64(int fpureg) const;
+
+ inline int16_t shamt6() const { return (imm12() & 0x3F); }
+ inline int16_t shamt5() const { return (imm12() & 0x1F); }
+ inline int16_t rvc_shamt6() const { return instr_.RvcShamt6(); }
+ inline int32_t s_imm12() const { return instr_.StoreOffset(); }
+ inline int32_t u_imm20() const { return instr_.Imm20UValue() << 12; }
+ inline int32_t rvc_u_imm6() const { return instr_.RvcImm6Value() << 12; }
+ inline void require(bool check) {
+ if (!check) {
+ SignalException(kIllegalInstruction);
+ }
+ }
+
+ // Special case of setRegister and getRegister to access the raw PC value.
+ void set_pc(int64_t value);
+ int64_t get_pc() const;
+
+ SimInstruction instr_;
+ // RISCV utlity API to access register value
+ // Helpers for data value tracing.
+ enum TraceType {
+ BYTE,
+ HALF,
+ WORD,
+# if JS_CODEGEN_RISCV64
+ DWORD,
+# endif
+ FLOAT,
+ DOUBLE,
+ // FLOAT_DOUBLE,
+ // WORD_DWORD
+ };
+ inline int32_t rs1_reg() const { return instr_.Rs1Value(); }
+ inline sreg_t rs1() const { return getRegister(rs1_reg()); }
+ inline float frs1() const { return getFpuRegisterFloat(rs1_reg()); }
+ inline double drs1() const { return getFpuRegisterDouble(rs1_reg()); }
+ inline Float32 frs1_boxed() const { return getFpuRegisterFloat32(rs1_reg()); }
+ inline Float64 drs1_boxed() const { return getFpuRegisterFloat64(rs1_reg()); }
+ inline int32_t rs2_reg() const { return instr_.Rs2Value(); }
+ inline sreg_t rs2() const { return getRegister(rs2_reg()); }
+ inline float frs2() const { return getFpuRegisterFloat(rs2_reg()); }
+ inline double drs2() const { return getFpuRegisterDouble(rs2_reg()); }
+ inline Float32 frs2_boxed() const { return getFpuRegisterFloat32(rs2_reg()); }
+ inline Float64 drs2_boxed() const { return getFpuRegisterFloat64(rs2_reg()); }
+ inline int32_t rs3_reg() const { return instr_.Rs3Value(); }
+ inline sreg_t rs3() const { return getRegister(rs3_reg()); }
+ inline float frs3() const { return getFpuRegisterFloat(rs3_reg()); }
+ inline double drs3() const { return getFpuRegisterDouble(rs3_reg()); }
+ inline Float32 frs3_boxed() const { return getFpuRegisterFloat32(rs3_reg()); }
+ inline Float64 drs3_boxed() const { return getFpuRegisterFloat64(rs3_reg()); }
+ inline int32_t rd_reg() const { return instr_.RdValue(); }
+ inline int32_t frd_reg() const { return instr_.RdValue(); }
+ inline int32_t rvc_rs1_reg() const { return instr_.RvcRs1Value(); }
+ inline sreg_t rvc_rs1() const { return getRegister(rvc_rs1_reg()); }
+ inline int32_t rvc_rs2_reg() const { return instr_.RvcRs2Value(); }
+ inline sreg_t rvc_rs2() const { return getRegister(rvc_rs2_reg()); }
+ inline double rvc_drs2() const { return getFpuRegisterDouble(rvc_rs2_reg()); }
+ inline int32_t rvc_rs1s_reg() const { return instr_.RvcRs1sValue(); }
+ inline sreg_t rvc_rs1s() const { return getRegister(rvc_rs1s_reg()); }
+ inline int32_t rvc_rs2s_reg() const { return instr_.RvcRs2sValue(); }
+ inline sreg_t rvc_rs2s() const { return getRegister(rvc_rs2s_reg()); }
+ inline double rvc_drs2s() const {
+ return getFpuRegisterDouble(rvc_rs2s_reg());
+ }
+ inline int32_t rvc_rd_reg() const { return instr_.RvcRdValue(); }
+ inline int32_t rvc_frd_reg() const { return instr_.RvcRdValue(); }
+ inline int16_t boffset() const { return instr_.BranchOffset(); }
+ inline int16_t imm12() const { return instr_.Imm12Value(); }
+ inline int32_t imm20J() const { return instr_.Imm20JValue(); }
+ inline int32_t imm5CSR() const { return instr_.Rs1Value(); }
+ inline int16_t csr_reg() const { return instr_.CsrValue(); }
+ inline int16_t rvc_imm6() const { return instr_.RvcImm6Value(); }
+ inline int16_t rvc_imm6_addi16sp() const {
+ return instr_.RvcImm6Addi16spValue();
+ }
+ inline int16_t rvc_imm8_addi4spn() const {
+ return instr_.RvcImm8Addi4spnValue();
+ }
+ inline int16_t rvc_imm6_lwsp() const { return instr_.RvcImm6LwspValue(); }
+ inline int16_t rvc_imm6_ldsp() const { return instr_.RvcImm6LdspValue(); }
+ inline int16_t rvc_imm6_swsp() const { return instr_.RvcImm6SwspValue(); }
+ inline int16_t rvc_imm6_sdsp() const { return instr_.RvcImm6SdspValue(); }
+ inline int16_t rvc_imm5_w() const { return instr_.RvcImm5WValue(); }
+ inline int16_t rvc_imm5_d() const { return instr_.RvcImm5DValue(); }
+ inline int16_t rvc_imm8_b() const { return instr_.RvcImm8BValue(); }
+
+ // Helper for debugging memory access.
+ inline void DieOrDebug();
+
+# if JS_CODEGEN_RISCV32
+ template <typename T>
+ void TraceRegWr(T value, TraceType t = WORD);
+# elif JS_CODEGEN_RISCV64
+ void TraceRegWr(sreg_t value, TraceType t = DWORD);
+# endif
+ void TraceMemWr(sreg_t addr, sreg_t value, TraceType t);
+ template <typename T>
+ void TraceMemRd(sreg_t addr, T value, sreg_t reg_value);
+ void TraceMemRdDouble(sreg_t addr, double value, int64_t reg_value);
+ void TraceMemRdDouble(sreg_t addr, Float64 value, int64_t reg_value);
+ void TraceMemRdFloat(sreg_t addr, Float32 value, int64_t reg_value);
+
+ template <typename T>
+ void TraceLr(sreg_t addr, T value, sreg_t reg_value);
+
+ template <typename T>
+ void TraceSc(sreg_t addr, T value);
+
+ template <typename T>
+ void TraceMemWr(sreg_t addr, T value);
+ void TraceMemWrDouble(sreg_t addr, double value);
+
+ inline void set_rd(sreg_t value, bool trace = true) {
+ setRegister(rd_reg(), value);
+# if JS_CODEGEN_RISCV64
+ if (trace) TraceRegWr(getRegister(rd_reg()), DWORD);
+# elif JS_CODEGEN_RISCV32
+ if (trace) TraceRegWr(getRegister(rd_reg()), WORD);
+# endif
+ }
+ inline void set_frd(float value, bool trace = true) {
+ setFpuRegisterFloat(rd_reg(), value);
+ if (trace) TraceRegWr(getFpuRegister(rd_reg()), FLOAT);
+ }
+ inline void set_frd(Float32 value, bool trace = true) {
+ setFpuRegisterFloat(rd_reg(), value);
+ if (trace) TraceRegWr(getFpuRegister(rd_reg()), FLOAT);
+ }
+ inline void set_drd(double value, bool trace = true) {
+ setFpuRegisterDouble(rd_reg(), value);
+ if (trace) TraceRegWr(getFpuRegister(rd_reg()), DOUBLE);
+ }
+ inline void set_drd(Float64 value, bool trace = true) {
+ setFpuRegisterDouble(rd_reg(), value);
+ if (trace) TraceRegWr(getFpuRegister(rd_reg()), DOUBLE);
+ }
+ inline void set_rvc_rd(sreg_t value, bool trace = true) {
+ setRegister(rvc_rd_reg(), value);
+# if JS_CODEGEN_RISCV64
+ if (trace) TraceRegWr(getRegister(rvc_rd_reg()), DWORD);
+# elif JS_CODEGEN_RISCV32
+ if (trace) TraceRegWr(getRegister(rvc_rd_reg()), WORD);
+# endif
+ }
+ inline void set_rvc_rs1s(sreg_t value, bool trace = true) {
+ setRegister(rvc_rs1s_reg(), value);
+# if JS_CODEGEN_RISCV64
+ if (trace) TraceRegWr(getRegister(rvc_rs1s_reg()), DWORD);
+# elif JS_CODEGEN_RISCV32
+ if (trace) TraceRegWr(getRegister(rvc_rs1s_reg()), WORD);
+# endif
+ }
+ inline void set_rvc_rs2(sreg_t value, bool trace = true) {
+ setRegister(rvc_rs2_reg(), value);
+# if JS_CODEGEN_RISCV64
+ if (trace) TraceRegWr(getRegister(rvc_rs2_reg()), DWORD);
+# elif JS_CODEGEN_RISCV32
+ if (trace) TraceRegWr(getRegister(rvc_rs2_reg()), WORD);
+# endif
+ }
+ inline void set_rvc_drd(double value, bool trace = true) {
+ setFpuRegisterDouble(rvc_rd_reg(), value);
+ if (trace) TraceRegWr(getFpuRegister(rvc_rd_reg()), DOUBLE);
+ }
+ inline void set_rvc_drd(Float64 value, bool trace = true) {
+ setFpuRegisterDouble(rvc_rd_reg(), value);
+ if (trace) TraceRegWr(getFpuRegister(rvc_rd_reg()), DOUBLE);
+ }
+ inline void set_rvc_frd(Float32 value, bool trace = true) {
+ setFpuRegisterFloat(rvc_rd_reg(), value);
+ if (trace) TraceRegWr(getFpuRegister(rvc_rd_reg()), DOUBLE);
+ }
+ inline void set_rvc_rs2s(sreg_t value, bool trace = true) {
+ setRegister(rvc_rs2s_reg(), value);
+# if JS_CODEGEN_RISCV64
+ if (trace) TraceRegWr(getRegister(rvc_rs2s_reg()), DWORD);
+# elif JS_CODEGEN_RISCV32
+ if (trace) TraceRegWr(getRegister(rvc_rs2s_reg()), WORD);
+# endif
+ }
+ inline void set_rvc_drs2s(double value, bool trace = true) {
+ setFpuRegisterDouble(rvc_rs2s_reg(), value);
+ if (trace) TraceRegWr(getFpuRegister(rvc_rs2s_reg()), DOUBLE);
+ }
+ inline void set_rvc_drs2s(Float64 value, bool trace = true) {
+ setFpuRegisterDouble(rvc_rs2s_reg(), value);
+ if (trace) TraceRegWr(getFpuRegister(rvc_rs2s_reg()), DOUBLE);
+ }
+
+ inline void set_rvc_frs2s(Float32 value, bool trace = true) {
+ setFpuRegisterFloat(rvc_rs2s_reg(), value);
+ if (trace) TraceRegWr(getFpuRegister(rvc_rs2s_reg()), FLOAT);
+ }
+
+ uint32_t get_dynamic_rounding_mode() { return read_csr_value(csr_frm); }
+
+ // helper functions to read/write/set/clear CRC values/bits
+ uint32_t read_csr_value(uint32_t csr) {
+ switch (csr) {
+ case csr_fflags: // Floating-Point Accrued Exceptions (RW)
+ return (FCSR_ & kFcsrFlagsMask);
+ case csr_frm: // Floating-Point Dynamic Rounding Mode (RW)
+ return (FCSR_ & kFcsrFrmMask) >> kFcsrFrmShift;
+ case csr_fcsr: // Floating-Point Control and Status Register (RW)
+ return (FCSR_ & kFcsrMask);
+ default:
+ MOZ_CRASH("UNIMPLEMENTED");
+ }
+ }
+
+ void write_csr_value(uint32_t csr, reg_t val) {
+ uint32_t value = (uint32_t)val;
+ switch (csr) {
+ case csr_fflags: // Floating-Point Accrued Exceptions (RW)
+ MOZ_ASSERT(value <= ((1 << kFcsrFlagsBits) - 1));
+ FCSR_ = (FCSR_ & (~kFcsrFlagsMask)) | value;
+ break;
+ case csr_frm: // Floating-Point Dynamic Rounding Mode (RW)
+ MOZ_ASSERT(value <= ((1 << kFcsrFrmBits) - 1));
+ FCSR_ = (FCSR_ & (~kFcsrFrmMask)) | (value << kFcsrFrmShift);
+ break;
+ case csr_fcsr: // Floating-Point Control and Status Register (RW)
+ MOZ_ASSERT(value <= ((1 << kFcsrBits) - 1));
+ FCSR_ = (FCSR_ & (~kFcsrMask)) | value;
+ break;
+ default:
+ MOZ_CRASH("UNIMPLEMENTED");
+ }
+ }
+
+ void set_csr_bits(uint32_t csr, reg_t val) {
+ uint32_t value = (uint32_t)val;
+ switch (csr) {
+ case csr_fflags: // Floating-Point Accrued Exceptions (RW)
+ MOZ_ASSERT(value <= ((1 << kFcsrFlagsBits) - 1));
+ FCSR_ = FCSR_ | value;
+ break;
+ case csr_frm: // Floating-Point Dynamic Rounding Mode (RW)
+ MOZ_ASSERT(value <= ((1 << kFcsrFrmBits) - 1));
+ FCSR_ = FCSR_ | (value << kFcsrFrmShift);
+ break;
+ case csr_fcsr: // Floating-Point Control and Status Register (RW)
+ MOZ_ASSERT(value <= ((1 << kFcsrBits) - 1));
+ FCSR_ = FCSR_ | value;
+ break;
+ default:
+ MOZ_CRASH("UNIMPLEMENTED");
+ }
+ }
+
+ void clear_csr_bits(uint32_t csr, reg_t val) {
+ uint32_t value = (uint32_t)val;
+ switch (csr) {
+ case csr_fflags: // Floating-Point Accrued Exceptions (RW)
+ MOZ_ASSERT(value <= ((1 << kFcsrFlagsBits) - 1));
+ FCSR_ = FCSR_ & (~value);
+ break;
+ case csr_frm: // Floating-Point Dynamic Rounding Mode (RW)
+ MOZ_ASSERT(value <= ((1 << kFcsrFrmBits) - 1));
+ FCSR_ = FCSR_ & (~(value << kFcsrFrmShift));
+ break;
+ case csr_fcsr: // Floating-Point Control and Status Register (RW)
+ MOZ_ASSERT(value <= ((1 << kFcsrBits) - 1));
+ FCSR_ = FCSR_ & (~value);
+ break;
+ default:
+ MOZ_CRASH("UNIMPLEMENTED");
+ }
+ }
+
+ bool test_fflags_bits(uint32_t mask) {
+ return (FCSR_ & kFcsrFlagsMask & mask) != 0;
+ }
+
+ void set_fflags(uint32_t flags) { set_csr_bits(csr_fflags, flags); }
+ void clear_fflags(int32_t flags) { clear_csr_bits(csr_fflags, flags); }
+
+ float RoundF2FHelper(float input_val, int rmode);
+ double RoundF2FHelper(double input_val, int rmode);
+ template <typename I_TYPE, typename F_TYPE>
+ I_TYPE RoundF2IHelper(F_TYPE original, int rmode);
+
+ template <typename T>
+ T FMaxMinHelper(T a, T b, MaxMinKind kind);
+
+ template <typename T>
+ bool CompareFHelper(T input1, T input2, FPUCondition cc);
+
+ template <typename T>
+ T get_pc_as() const {
+ return reinterpret_cast<T>(get_pc());
+ }
+
+ void enable_single_stepping(SingleStepCallback cb, void* arg);
+ void disable_single_stepping();
+
+ // Accessor to the internal simulator stack area.
+ uintptr_t stackLimit() const;
+ bool overRecursed(uintptr_t newsp = 0) const;
+ bool overRecursedWithExtra(uint32_t extra) const;
+
+ // Executes MIPS instructions until the PC reaches end_sim_pc.
+ template <bool enableStopSimAt>
+ void execute();
+
+ // Sets up the simulator state and grabs the result on return.
+ int64_t call(uint8_t* entry, int argument_count, ...);
+
+ // Push an address onto the JS stack.
+ uintptr_t pushAddress(uintptr_t address);
+
+ // Pop an address from the JS stack.
+ uintptr_t popAddress();
+
+ // Debugger input.
+ void setLastDebuggerInput(char* input);
+ char* lastDebuggerInput() { return lastDebuggerInput_; }
+
+ // Returns true if pc register contains one of the 'SpecialValues' defined
+ // below (bad_ra, end_sim_pc).
+ bool has_bad_pc() const;
+
+ private:
+ enum SpecialValues {
+ // Known bad pc value to ensure that the simulator does not execute
+ // without being properly setup.
+ bad_ra = -1,
+ // A pc value used to signal the simulator to stop execution. Generally
+ // the ra is set to this value on transition from native C code to
+ // simulated execution, so that the simulator can "return" to the native
+ // C code.
+ end_sim_pc = -2,
+ // Unpredictable value.
+ Unpredictable = 0xbadbeaf
+ };
+
+ bool init();
+
+ // Unsupported instructions use Format to print an error and stop execution.
+ void format(SimInstruction* instr, const char* format);
+
+ // Read and write memory.
+ // RISCV Memory read/write methods
+ template <typename T>
+ T ReadMem(sreg_t addr, Instruction* instr);
+ template <typename T>
+ void WriteMem(sreg_t addr, T value, Instruction* instr);
+ template <typename T, typename OP>
+ T amo(sreg_t addr, OP f, Instruction* instr, TraceType t) {
+ auto lhs = ReadMem<T>(addr, instr);
+ // TODO(RISCV): trace memory read for AMO
+ WriteMem<T>(addr, (T)f(lhs), instr);
+ return lhs;
+ }
+
+ inline int32_t loadLinkedW(uint64_t addr, SimInstruction* instr);
+ inline int storeConditionalW(uint64_t addr, int32_t value,
+ SimInstruction* instr);
+
+ inline int64_t loadLinkedD(uint64_t addr, SimInstruction* instr);
+ inline int storeConditionalD(uint64_t addr, int64_t value,
+ SimInstruction* instr);
+
+ // Used for breakpoints and traps.
+ void SoftwareInterrupt();
+
+ // Stop helper functions.
+ bool isWatchpoint(uint32_t code);
+ bool IsTracepoint(uint32_t code);
+ void printWatchpoint(uint32_t code);
+ void handleStop(uint32_t code);
+ bool isStopInstruction(SimInstruction* instr);
+ bool isEnabledStop(uint32_t code);
+ void enableStop(uint32_t code);
+ void disableStop(uint32_t code);
+ void increaseStopCounter(uint32_t code);
+ void printStopInfo(uint32_t code);
+
+ // Simulator breakpoints.
+ struct Breakpoint {
+ SimInstruction* location;
+ bool enabled;
+ bool is_tbreak;
+ };
+ std::vector<Breakpoint> breakpoints_;
+ void SetBreakpoint(SimInstruction* breakpoint, bool is_tbreak);
+ void ListBreakpoints();
+ void CheckBreakpoints();
+
+ JS::ProfilingFrameIterator::RegisterState registerState();
+
+ // Handle any wasm faults, returning true if the fault was handled.
+ // This method is rather hot so inline the normal (no-wasm) case.
+ bool MOZ_ALWAYS_INLINE handleWasmSegFault(uint64_t addr, unsigned numBytes) {
+ if (MOZ_LIKELY(!js::wasm::CodeExists)) {
+ return false;
+ }
+
+ uint8_t* newPC;
+ if (!js::wasm::MemoryAccessTraps(registerState(), (uint8_t*)addr, numBytes,
+ &newPC)) {
+ return false;
+ }
+
+ LLBit_ = false;
+ set_pc(int64_t(newPC));
+ return true;
+ }
+
+ // Executes one instruction.
+ void InstructionDecode(Instruction* instr);
+
+ // ICache.
+ // static void CheckICache(base::CustomMatcherHashMap* i_cache,
+ // Instruction* instr);
+ // static void FlushOnePage(base::CustomMatcherHashMap* i_cache, intptr_t
+ // start,
+ // size_t size);
+ // static CachePage* GetCachePage(base::CustomMatcherHashMap* i_cache,
+ // void* page);
+ template <typename T, typename Func>
+ inline T CanonicalizeFPUOpFMA(Func fn, T dst, T src1, T src2) {
+ static_assert(std::is_floating_point<T>::value);
+ auto alu_out = fn(dst, src1, src2);
+ // if any input or result is NaN, the result is quiet_NaN
+ if (std::isnan(alu_out) || std::isnan(src1) || std::isnan(src2) ||
+ std::isnan(dst)) {
+ // signaling_nan sets kInvalidOperation bit
+ if (isSnan(alu_out) || isSnan(src1) || isSnan(src2) || isSnan(dst))
+ set_fflags(kInvalidOperation);
+ alu_out = std::numeric_limits<T>::quiet_NaN();
+ }
+ return alu_out;
+ }
+
+ template <typename T, typename Func>
+ inline T CanonicalizeFPUOp3(Func fn) {
+ static_assert(std::is_floating_point<T>::value);
+ T src1 = std::is_same<float, T>::value ? frs1() : drs1();
+ T src2 = std::is_same<float, T>::value ? frs2() : drs2();
+ T src3 = std::is_same<float, T>::value ? frs3() : drs3();
+ auto alu_out = fn(src1, src2, src3);
+ // if any input or result is NaN, the result is quiet_NaN
+ if (std::isnan(alu_out) || std::isnan(src1) || std::isnan(src2) ||
+ std::isnan(src3)) {
+ // signaling_nan sets kInvalidOperation bit
+ if (isSnan(alu_out) || isSnan(src1) || isSnan(src2) || isSnan(src3))
+ set_fflags(kInvalidOperation);
+ alu_out = std::numeric_limits<T>::quiet_NaN();
+ }
+ return alu_out;
+ }
+
+ template <typename T, typename Func>
+ inline T CanonicalizeFPUOp2(Func fn) {
+ static_assert(std::is_floating_point<T>::value);
+ T src1 = std::is_same<float, T>::value ? frs1() : drs1();
+ T src2 = std::is_same<float, T>::value ? frs2() : drs2();
+ auto alu_out = fn(src1, src2);
+ // if any input or result is NaN, the result is quiet_NaN
+ if (std::isnan(alu_out) || std::isnan(src1) || std::isnan(src2)) {
+ // signaling_nan sets kInvalidOperation bit
+ if (isSnan(alu_out) || isSnan(src1) || isSnan(src2))
+ set_fflags(kInvalidOperation);
+ alu_out = std::numeric_limits<T>::quiet_NaN();
+ }
+ return alu_out;
+ }
+
+ template <typename T, typename Func>
+ inline T CanonicalizeFPUOp1(Func fn) {
+ static_assert(std::is_floating_point<T>::value);
+ T src1 = std::is_same<float, T>::value ? frs1() : drs1();
+ auto alu_out = fn(src1);
+ // if any input or result is NaN, the result is quiet_NaN
+ if (std::isnan(alu_out) || std::isnan(src1)) {
+ // signaling_nan sets kInvalidOperation bit
+ if (isSnan(alu_out) || isSnan(src1)) set_fflags(kInvalidOperation);
+ alu_out = std::numeric_limits<T>::quiet_NaN();
+ }
+ return alu_out;
+ }
+
+ template <typename Func>
+ inline float CanonicalizeDoubleToFloatOperation(Func fn) {
+ float alu_out = fn(drs1());
+ if (std::isnan(alu_out) || std::isnan(drs1()))
+ alu_out = std::numeric_limits<float>::quiet_NaN();
+ return alu_out;
+ }
+
+ template <typename Func>
+ inline float CanonicalizeDoubleToFloatOperation(Func fn, double frs) {
+ float alu_out = fn(frs);
+ if (std::isnan(alu_out) || std::isnan(drs1()))
+ alu_out = std::numeric_limits<float>::quiet_NaN();
+ return alu_out;
+ }
+
+ template <typename Func>
+ inline float CanonicalizeFloatToDoubleOperation(Func fn, float frs) {
+ double alu_out = fn(frs);
+ if (std::isnan(alu_out) || std::isnan(frs1()))
+ alu_out = std::numeric_limits<double>::quiet_NaN();
+ return alu_out;
+ }
+
+ template <typename Func>
+ inline float CanonicalizeFloatToDoubleOperation(Func fn) {
+ double alu_out = fn(frs1());
+ if (std::isnan(alu_out) || std::isnan(frs1()))
+ alu_out = std::numeric_limits<double>::quiet_NaN();
+ return alu_out;
+ }
+
+ public:
+ static int64_t StopSimAt;
+
+ // Runtime call support.
+ static void* RedirectNativeFunction(void* nativeFunction,
+ ABIFunctionType type);
+
+ private:
+ enum Exception {
+ none,
+ kIntegerOverflow,
+ kIntegerUnderflow,
+ kDivideByZero,
+ kNumExceptions,
+ // RISCV illegual instruction exception
+ kIllegalInstruction,
+ };
+ int16_t exceptions[kNumExceptions];
+
+ // Exceptions.
+ void SignalException(Exception e);
+
+ // Handle return value for runtime FP functions.
+ void setCallResultDouble(double result);
+ void setCallResultFloat(float result);
+ void setCallResult(int64_t res);
+ void setCallResult(__int128 res);
+
+ void callInternal(uint8_t* entry);
+
+ // Architecture state.
+ // Registers.
+ int64_t registers_[kNumSimuRegisters];
+ // Coprocessor Registers.
+ int64_t FPUregisters_[kNumFPURegisters];
+ // FPU control register.
+ uint32_t FCSR_;
+
+ bool LLBit_;
+ uintptr_t LLAddr_;
+ int64_t lastLLValue_;
+
+ // Simulator support.
+ char* stack_;
+ uintptr_t stackLimit_;
+ bool pc_modified_;
+ int64_t icount_;
+ int64_t break_count_;
+
+ // Debugger input.
+ char* lastDebuggerInput_;
+
+ intptr_t* watch_address_ = nullptr;
+ intptr_t watch_value_ = 0;
+
+ // Registered breakpoints.
+ SimInstruction* break_pc_;
+ Instr break_instr_;
+ EmbeddedVector<char, 256> trace_buf_;
+
+ // Single-stepping support
+ bool single_stepping_;
+ SingleStepCallback single_step_callback_;
+ void* single_step_callback_arg_;
+
+ // A stop is watched if its code is less than kNumOfWatchedStops.
+ // Only watched stops support enabling/disabling and the counter feature.
+ static const uint32_t kNumOfWatchedStops = 256;
+
+ // Stop is disabled if bit 31 is set.
+ static const uint32_t kStopDisabledBit = 1U << 31;
+
+ // A stop is enabled, meaning the simulator will stop when meeting the
+ // instruction, if bit 31 of watchedStops_[code].count is unset.
+ // The value watchedStops_[code].count & ~(1 << 31) indicates how many times
+ // the breakpoint was hit or gone through.
+ struct StopCountAndDesc {
+ uint32_t count_;
+ char* desc_;
+ };
+ StopCountAndDesc watchedStops_[kNumOfWatchedStops];
+};
+
+// Process wide simulator state.
+class SimulatorProcess {
+ friend class Redirection;
+ friend class AutoLockSimulatorCache;
+
+ private:
+ // ICache checking.
+ struct ICacheHasher {
+ typedef void* Key;
+ typedef void* Lookup;
+ static HashNumber hash(const Lookup& l);
+ static bool match(const Key& k, const Lookup& l);
+ };
+
+ public:
+ typedef HashMap<void*, CachePage*, ICacheHasher, SystemAllocPolicy> ICacheMap;
+
+ static mozilla::Atomic<size_t, mozilla::ReleaseAcquire>
+ ICacheCheckingDisableCount;
+ static void FlushICache(void* start, size_t size);
+
+ static void checkICacheLocked(SimInstruction* instr);
+
+ static bool initialize() {
+ singleton_ = js_new<SimulatorProcess>();
+ return singleton_;
+ }
+ static void destroy() {
+ js_delete(singleton_);
+ singleton_ = nullptr;
+ }
+
+ SimulatorProcess();
+ ~SimulatorProcess();
+
+ private:
+ static SimulatorProcess* singleton_;
+
+ // This lock creates a critical section around 'redirection_' and
+ // 'icache_', which are referenced both by the execution engine
+ // and by the off-thread compiler (see Redirection::Get in the cpp file).
+ Mutex cacheLock_ MOZ_UNANNOTATED;
+
+ Redirection* redirection_;
+ ICacheMap icache_;
+
+ public:
+ static ICacheMap& icache() {
+ // Technically we need the lock to access the innards of the
+ // icache, not to take its address, but the latter condition
+ // serves as a useful complement to the former.
+ singleton_->cacheLock_.assertOwnedByCurrentThread();
+ return singleton_->icache_;
+ }
+
+ static Redirection* redirection() {
+ singleton_->cacheLock_.assertOwnedByCurrentThread();
+ return singleton_->redirection_;
+ }
+
+ static void setRedirection(js::jit::Redirection* redirection) {
+ singleton_->cacheLock_.assertOwnedByCurrentThread();
+ singleton_->redirection_ = redirection;
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* JS_SIMULATOR_MIPS64 */
+
+#endif /* jit_riscv64_Simulator_riscv64_h */
diff --git a/js/src/jit/riscv64/Trampoline-riscv64.cpp b/js/src/jit/riscv64/Trampoline-riscv64.cpp
new file mode 100644
index 0000000000..6a8782ddfd
--- /dev/null
+++ b/js/src/jit/riscv64/Trampoline-riscv64.cpp
@@ -0,0 +1,856 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/Bailouts.h"
+#include "jit/BaselineFrame.h"
+#include "jit/CalleeToken.h"
+#include "jit/JitFrames.h"
+#include "jit/JitRuntime.h"
+#ifdef JS_ION_PERF
+# include "jit/PerfSpewer.h"
+#endif
+#include "jit/riscv64/SharedICRegisters-riscv64.h"
+#include "jit/VMFunctions.h"
+#include "vm/JitActivation.h" // js::jit::JitActivation
+#include "vm/JSContext.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+// This file includes stubs for generating the JIT trampolines when there is no
+// JIT backend, and also includes implementations for assorted random things
+// which can't be implemented in headers.
+
+// All registers to save and restore. This includes the stack pointer, since we
+// use the ability to reference register values on the stack by index.
+static const LiveRegisterSet AllRegs =
+ LiveRegisterSet(GeneralRegisterSet(Registers::AllMask),
+ FloatRegisterSet(FloatRegisters::AllMask));
+
+static void PushBailoutFrame(MacroAssembler& masm, Register spArg) {
+ // Push the frameSize_ stored in ra
+ // See: CodeGeneratorLOONG64::generateOutOfLineCode()
+ masm.push(ra);
+
+ // Push registers such that we can access them from [base + code].
+ masm.PushRegsInMask(AllRegs);
+
+ // Put pointer to BailoutStack as first argument to the Bailout()
+ masm.movePtr(StackPointer, spArg);
+}
+
+struct EnterJITRegs {
+ double fs11;
+ double fs10;
+ double fs9;
+ double fs8;
+ double fs7;
+ double fs6;
+ double fs5;
+ double fs4;
+ double fs3;
+ double fs2;
+ double fs1;
+ double fs0;
+
+ // uintptr_t align;
+
+ // non-volatile registers.
+ uint64_t ra;
+ uint64_t sp;
+ uint64_t fp;
+ uint64_t gp;
+ uint64_t s11;
+ uint64_t s10;
+ uint64_t s9;
+ uint64_t s8;
+ uint64_t s7;
+ uint64_t s6;
+ uint64_t s5;
+ uint64_t s4;
+ uint64_t s3;
+ uint64_t s2;
+ uint64_t s1;
+ // Save reg_vp(a7) on stack, use it after call jit code.
+ uint64_t a7;
+};
+
+static void GenerateReturn(MacroAssembler& masm, int returnCode) {
+ MOZ_ASSERT(masm.framePushed() == sizeof(EnterJITRegs));
+
+ // Restore non-volatile registers
+ masm.ld(s1, StackPointer, offsetof(EnterJITRegs, s1));
+ masm.ld(s2, StackPointer, offsetof(EnterJITRegs, s2));
+ masm.ld(s3, StackPointer, offsetof(EnterJITRegs, s3));
+ masm.ld(s4, StackPointer, offsetof(EnterJITRegs, s4));
+ masm.ld(s5, StackPointer, offsetof(EnterJITRegs, s5));
+ masm.ld(s6, StackPointer, offsetof(EnterJITRegs, s6));
+ masm.ld(s7, StackPointer, offsetof(EnterJITRegs, s7));
+ masm.ld(s8, StackPointer, offsetof(EnterJITRegs, s8));
+ masm.ld(s9, StackPointer, offsetof(EnterJITRegs, s9));
+ masm.ld(s10, StackPointer, offsetof(EnterJITRegs, s10));
+ masm.ld(s11, StackPointer, offsetof(EnterJITRegs, s11));
+ masm.ld(gp, StackPointer, offsetof(EnterJITRegs, gp));
+ masm.ld(fp, StackPointer, offsetof(EnterJITRegs, fp));
+ masm.ld(sp, StackPointer, offsetof(EnterJITRegs, sp));
+ masm.ld(ra, StackPointer, offsetof(EnterJITRegs, ra));
+
+ // Restore non-volatile floating point registers
+ masm.fld(fs11, StackPointer, offsetof(EnterJITRegs, fs11));
+ masm.fld(fs10, StackPointer, offsetof(EnterJITRegs, fs10));
+ masm.fld(fs9, StackPointer, offsetof(EnterJITRegs, fs9));
+ masm.fld(fs8, StackPointer, offsetof(EnterJITRegs, fs8));
+ masm.fld(fs7, StackPointer, offsetof(EnterJITRegs, fs7));
+ masm.fld(fs6, StackPointer, offsetof(EnterJITRegs, fs6));
+ masm.fld(fs5, StackPointer, offsetof(EnterJITRegs, fs5));
+ masm.fld(fs4, StackPointer, offsetof(EnterJITRegs, fs4));
+ masm.fld(fs3, StackPointer, offsetof(EnterJITRegs, fs3));
+ masm.fld(fs2, StackPointer, offsetof(EnterJITRegs, fs2));
+ masm.fld(fs1, StackPointer, offsetof(EnterJITRegs, fs1));
+ masm.fld(fs0, StackPointer, offsetof(EnterJITRegs, fs0));
+
+ masm.freeStack(sizeof(EnterJITRegs));
+
+ masm.branch(ra);
+}
+
+static void GeneratePrologue(MacroAssembler& masm) {
+ masm.reserveStack(sizeof(EnterJITRegs));
+
+ masm.sd(s1, StackPointer, offsetof(EnterJITRegs, s1));
+ masm.sd(s2, StackPointer, offsetof(EnterJITRegs, s2));
+ masm.sd(s3, StackPointer, offsetof(EnterJITRegs, s3));
+ masm.sd(s4, StackPointer, offsetof(EnterJITRegs, s4));
+ masm.sd(s5, StackPointer, offsetof(EnterJITRegs, s5));
+ masm.sd(s6, StackPointer, offsetof(EnterJITRegs, s6));
+ masm.sd(s7, StackPointer, offsetof(EnterJITRegs, s7));
+ masm.sd(s8, StackPointer, offsetof(EnterJITRegs, s8));
+ masm.sd(s9, StackPointer, offsetof(EnterJITRegs, s9));
+ masm.sd(s10, StackPointer, offsetof(EnterJITRegs, s10));
+ masm.sd(s11, StackPointer, offsetof(EnterJITRegs, s11));
+ masm.sd(gp, StackPointer, offsetof(EnterJITRegs, gp));
+ masm.sd(fp, StackPointer, offsetof(EnterJITRegs, fp));
+ masm.sd(sp, StackPointer, offsetof(EnterJITRegs, sp));
+ masm.sd(ra, StackPointer, offsetof(EnterJITRegs, ra));
+ masm.sd(a7, StackPointer, offsetof(EnterJITRegs, a7));
+
+ masm.fsd(fs11, StackPointer, offsetof(EnterJITRegs, fs11));
+ masm.fsd(fs10, StackPointer, offsetof(EnterJITRegs, fs10));
+ masm.fsd(fs9, StackPointer, offsetof(EnterJITRegs, fs9));
+ masm.fsd(fs8, StackPointer, offsetof(EnterJITRegs, fs8));
+ masm.fsd(fs7, StackPointer, offsetof(EnterJITRegs, fs7));
+ masm.fsd(fs6, StackPointer, offsetof(EnterJITRegs, fs6));
+ masm.fsd(fs5, StackPointer, offsetof(EnterJITRegs, fs5));
+ masm.fsd(fs4, StackPointer, offsetof(EnterJITRegs, fs4));
+ masm.fsd(fs3, StackPointer, offsetof(EnterJITRegs, fs3));
+ masm.fsd(fs2, StackPointer, offsetof(EnterJITRegs, fs2));
+ masm.fsd(fs1, StackPointer, offsetof(EnterJITRegs, fs1));
+ masm.fsd(fs0, StackPointer, offsetof(EnterJITRegs, fs0));
+}
+
+static void GenerateBailoutThunk(MacroAssembler& masm, Label* bailoutTail) {
+ PushBailoutFrame(masm, a0);
+
+ // Make space for Bailout's bailoutInfo outparam.
+ masm.reserveStack(sizeof(void*));
+ masm.movePtr(StackPointer, a1);
+
+ // Call the bailout function.
+ using Fn = bool (*)(BailoutStack * sp, BaselineBailoutInfo * *info);
+ masm.setupUnalignedABICall(a2);
+ masm.passABIArg(a0);
+ masm.passABIArg(a1);
+ masm.callWithABI<Fn, Bailout>(MoveOp::GENERAL,
+ CheckUnsafeCallWithABI::DontCheckOther);
+
+ // Get the bailoutInfo outparam.
+ masm.pop(a2);
+
+ // Remove both the bailout frame and the topmost Ion frame's stack.
+ masm.moveToStackPtr(FramePointer);
+
+ // Jump to shared bailout tail. The BailoutInfo pointer has to be in a2.
+ masm.jump(bailoutTail);
+}
+
+// Generates a trampoline for calling Jit compiled code from a C++ function.
+// The trampoline use the EnterJitCode signature, with the standard x64 fastcall
+// calling convention.
+void JitRuntime::generateEnterJIT(JSContext* cx, MacroAssembler& masm) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateEnterJIT");
+
+ enterJITOffset_ = startTrampolineCode(masm);
+
+ const Register reg_code = IntArgReg0;
+ const Register reg_argc = IntArgReg1;
+ const Register reg_argv = IntArgReg2;
+ const mozilla::DebugOnly<Register> reg_frame = IntArgReg3;
+ const Register reg_token = IntArgReg4;
+ const Register reg_chain = IntArgReg5;
+ const Register reg_values = IntArgReg6;
+ const Register reg_vp = IntArgReg7;
+
+ MOZ_ASSERT(OsrFrameReg == reg_frame);
+
+ GeneratePrologue(masm);
+
+ // Save stack pointer as baseline frame.
+ masm.movePtr(StackPointer, FramePointer);
+
+ // Load the number of actual arguments into s3.
+ masm.unboxInt32(Address(reg_vp, 0), s3);
+
+ /***************************************************************
+ Loop over argv vector, push arguments onto stack in reverse order
+ ***************************************************************/
+
+ // if we are constructing, that also needs to include newTarget
+ JitSpew(JitSpew_Codegen, "__Line__: %d", __LINE__);
+ {
+ Label noNewTarget;
+ masm.branchTest32(Assembler::Zero, reg_token,
+ Imm32(CalleeToken_FunctionConstructing), &noNewTarget);
+
+ masm.add32(Imm32(1), reg_argc);
+
+ masm.bind(&noNewTarget);
+ }
+ JitSpew(JitSpew_Codegen, "__Line__: %d", __LINE__);
+ // Make stack algined
+ masm.ma_and(s2, reg_argc, Imm32(1));
+ masm.ma_sub64(s1, zero, Imm32(sizeof(Value)));
+ Label no_zero;
+ masm.ma_branch(&no_zero, Assembler::Condition::Equal, s2, Operand(0));
+ masm.mv(s1, zero);
+ masm.bind(&no_zero);
+ masm.ma_add64(StackPointer, StackPointer, s1);
+
+ masm.slli(s2, reg_argc, 3); // Value* argv
+ masm.addPtr(reg_argv, s2); // s2 = &argv[argc]
+ JitSpew(JitSpew_Codegen, "__Line__: %d", __LINE__);
+ // Loop over arguments, copying them from an unknown buffer onto the Ion
+ // stack so they can be accessed from JIT'ed code.
+ Label header, footer;
+ // If there aren't any arguments, don't do anything
+ masm.ma_b(s2, reg_argv, &footer, Assembler::BelowOrEqual, ShortJump);
+ {
+ masm.bind(&header);
+
+ masm.subPtr(Imm32(sizeof(Value)), s2);
+ masm.subPtr(Imm32(sizeof(Value)), StackPointer);
+
+ ValueOperand value = ValueOperand(s6);
+ masm.loadValue(Address(s2, 0), value);
+ masm.storeValue(value, Address(StackPointer, 0));
+
+ masm.ma_b(s2, reg_argv, &header, Assembler::Above, ShortJump);
+ }
+ masm.bind(&footer);
+ JitSpew(JitSpew_Codegen, "__Line__: %d", __LINE__);
+ masm.push(reg_token);
+ masm.pushFrameDescriptorForJitCall(FrameType::CppToJSJit, s3, s3);
+
+ CodeLabel returnLabel;
+ Label oomReturnLabel;
+ {
+ // Handle Interpreter -> Baseline OSR.
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ MOZ_ASSERT(!regs.has(FramePointer));
+ regs.take(OsrFrameReg);
+ regs.take(reg_code);
+ MOZ_ASSERT(!regs.has(ReturnReg), "ReturnReg matches reg_code");
+
+ Label notOsr;
+ masm.ma_b(OsrFrameReg, OsrFrameReg, &notOsr, Assembler::Zero, ShortJump);
+
+ Register numStackValues = reg_values;
+ regs.take(numStackValues);
+ Register scratch = regs.takeAny();
+
+ // Push return address.
+ masm.subPtr(Imm32(sizeof(uintptr_t)), StackPointer);
+ masm.ma_li(scratch, &returnLabel);
+ masm.storePtr(scratch, Address(StackPointer, 0));
+
+ // Push previous frame pointer.
+ masm.subPtr(Imm32(sizeof(uintptr_t)), StackPointer);
+ masm.storePtr(FramePointer, Address(StackPointer, 0));
+
+ // Reserve frame.
+ Register framePtr = FramePointer;
+ masm.movePtr(StackPointer, framePtr);
+ masm.subPtr(Imm32(BaselineFrame::Size()), StackPointer);
+
+ Register framePtrScratch = regs.takeAny();
+ masm.movePtr(sp, framePtrScratch);
+
+ // Reserve space for locals and stack values.
+ masm.slli(scratch, numStackValues, 3);
+ masm.subPtr(scratch, StackPointer);
+
+ // Enter exit frame.
+ masm.reserveStack(3 * sizeof(uintptr_t));
+ masm.storePtr(
+ ImmWord(MakeFrameDescriptor(FrameType::BaselineJS)),
+ Address(StackPointer, 2 * sizeof(uintptr_t))); // Frame descriptor
+ masm.storePtr(
+ zero, Address(StackPointer, sizeof(uintptr_t))); // fake return address
+ masm.storePtr(FramePointer, Address(StackPointer, 0));
+
+ // No GC things to mark, push a bare token.
+ masm.loadJSContext(scratch);
+ masm.enterFakeExitFrame(scratch, scratch, ExitFrameType::Bare);
+
+ masm.reserveStack(2 * sizeof(uintptr_t));
+ masm.storePtr(framePtr,
+ Address(StackPointer, sizeof(uintptr_t))); // BaselineFrame
+ masm.storePtr(reg_code, Address(StackPointer, 0)); // jitcode
+
+ using Fn = bool (*)(BaselineFrame * frame, InterpreterFrame * interpFrame,
+ uint32_t numStackValues);
+ masm.setupUnalignedABICall(scratch);
+ masm.passABIArg(framePtrScratch); // BaselineFrame
+ masm.passABIArg(OsrFrameReg); // InterpreterFrame
+ masm.passABIArg(numStackValues);
+ masm.callWithABI<Fn, jit::InitBaselineFrameForOsr>(
+ MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+
+ regs.add(OsrFrameReg);
+ Register jitcode = regs.takeAny();
+ masm.loadPtr(Address(StackPointer, 0), jitcode);
+ masm.loadPtr(Address(StackPointer, sizeof(uintptr_t)), framePtr);
+ masm.freeStack(2 * sizeof(uintptr_t));
+
+ Label error;
+ masm.freeStack(ExitFrameLayout::SizeWithFooter());
+ masm.branchIfFalseBool(ReturnReg, &error);
+
+ // If OSR-ing, then emit instrumentation for setting lastProfilerFrame
+ // if profiler instrumentation is enabled.
+ {
+ Label skipProfilingInstrumentation;
+ AbsoluteAddress addressOfEnabled(
+ cx->runtime()->geckoProfiler().addressOfEnabled());
+ masm.branch32(Assembler::Equal, addressOfEnabled, Imm32(0),
+ &skipProfilingInstrumentation);
+ masm.profilerEnterFrame(framePtr, scratch);
+ masm.bind(&skipProfilingInstrumentation);
+ }
+
+ masm.jump(jitcode);
+
+ // OOM: load error value, discard return address and previous frame
+ // pointer and return.
+ masm.bind(&error);
+ masm.movePtr(framePtr, StackPointer);
+ masm.addPtr(Imm32(2 * sizeof(uintptr_t)), StackPointer);
+ masm.moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
+ masm.jump(&oomReturnLabel);
+
+ masm.bind(&notOsr);
+ // Load the scope chain in R1.
+ MOZ_ASSERT(R1.scratchReg() != reg_code);
+ masm.ma_or(R1.scratchReg(), reg_chain, zero);
+ }
+ JitSpew(JitSpew_Codegen, "__Line__: %d", __LINE__);
+ // The call will push the return address and frame pointer on the stack, thus
+ // we check that the stack would be aligned once the call is complete.
+ masm.assertStackAlignment(JitStackAlignment, 2 * sizeof(uintptr_t));
+
+ // Call the function with pushing return address to stack.
+ masm.callJitNoProfiler(reg_code);
+
+ {
+ // Interpreter -> Baseline OSR will return here.
+ masm.bind(&returnLabel);
+ masm.addCodeLabel(returnLabel);
+ masm.bind(&oomReturnLabel);
+ }
+
+ // Discard arguments and padding. Set sp to the address of the EnterJITRegs
+ // on the stack.
+ masm.mov(FramePointer, StackPointer);
+
+ // Store the returned value into the vp
+ masm.ld(reg_vp, StackPointer, offsetof(EnterJITRegs, a7));
+ masm.storeValue(JSReturnOperand, Address(reg_vp, 0));
+ JitSpew(JitSpew_Codegen, "__Line__: %d", __LINE__);
+ // Restore non-volatile registers and return.
+ GenerateReturn(masm, ShortJump);
+}
+
+// static
+mozilla::Maybe<::JS::ProfilingFrameIterator::RegisterState>
+JitRuntime::getCppEntryRegisters(JitFrameLayout* frameStackAddress) {
+ return mozilla::Nothing{};
+}
+
+void JitRuntime::generateInvalidator(MacroAssembler& masm, Label* bailoutTail) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateInvalidator");
+
+ invalidatorOffset_ = startTrampolineCode(masm);
+
+ // Stack has to be alligned here. If not, we will have to fix it.
+ masm.checkStackAlignment();
+
+ // Push registers such that we can access them from [base + code].
+ masm.PushRegsInMask(AllRegs);
+
+ // Pass pointer to InvalidationBailoutStack structure.
+ masm.movePtr(StackPointer, a0);
+
+ // Reserve place for BailoutInfo pointer. Two words to ensure alignment for
+ // setupAlignedABICall.
+ masm.subPtr(Imm32(2 * sizeof(uintptr_t)), StackPointer);
+ // Pass pointer to BailoutInfo
+ masm.movePtr(StackPointer, a1);
+
+ using Fn =
+ bool (*)(InvalidationBailoutStack * sp, BaselineBailoutInfo * *info);
+ masm.setupAlignedABICall();
+ masm.passABIArg(a0);
+ masm.passABIArg(a1);
+ masm.callWithABI<Fn, InvalidationBailout>(
+ MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckOther);
+
+ masm.pop(a2);
+
+ // Pop the machine state and the dead frame.
+ masm.moveToStackPtr(FramePointer);
+
+ // Jump to shared bailout tail. The BailoutInfo pointer has to be in r2.
+ masm.jump(bailoutTail);
+}
+
+void JitRuntime::generateArgumentsRectifier(MacroAssembler& masm,
+ ArgumentsRectifierKind kind) {
+ // Do not erase the frame pointer in this function.
+
+ AutoCreatedBy acb(masm, "JitRuntime::generateArgumentsRectifier");
+
+ switch (kind) {
+ case ArgumentsRectifierKind::Normal:
+ argumentsRectifierOffset_ = startTrampolineCode(masm);
+ break;
+ case ArgumentsRectifierKind::TrialInlining:
+ trialInliningArgumentsRectifierOffset_ = startTrampolineCode(masm);
+ break;
+ }
+ masm.pushReturnAddress();
+ // Caller:
+ // [arg2] [arg1] [this] [[argc] [callee] [descr] [raddr]] <- sp
+
+ // Frame prologue.
+ //
+ // NOTE: if this changes, fix the Baseline bailout code too!
+ // See BaselineStackBuilder::calculatePrevFramePtr and
+ // BaselineStackBuilder::buildRectifierFrame (in BaselineBailouts.cpp).
+ masm.push(FramePointer);
+ masm.mov(StackPointer, FramePointer);
+
+ // Load argc.
+ masm.loadNumActualArgs(FramePointer, s3);
+
+ Register numActArgsReg = a6;
+ Register calleeTokenReg = a7;
+ Register numArgsReg = a5;
+
+ // Load |nformals| into numArgsReg.
+ masm.loadPtr(
+ Address(FramePointer, RectifierFrameLayout::offsetOfCalleeToken()),
+ calleeTokenReg);
+ masm.mov(calleeTokenReg, numArgsReg);
+ masm.andPtr(Imm32(uint32_t(CalleeTokenMask)), numArgsReg);
+ masm.loadFunctionArgCount(numArgsReg, numArgsReg);
+
+ // Stash another copy in t3, since we are going to do destructive operations
+ // on numArgsReg
+ masm.mov(numArgsReg, t3);
+
+ static_assert(
+ CalleeToken_FunctionConstructing == 1,
+ "Ensure that we can use the constructing bit to count the value");
+ masm.mov(calleeTokenReg, t2);
+ masm.ma_and(t2, t2, Imm32(uint32_t(CalleeToken_FunctionConstructing)));
+
+ // Including |this|, and |new.target|, there are (|nformals| + 1 +
+ // isConstructing) arguments to push to the stack. Then we push a
+ // JitFrameLayout. We compute the padding expressed in the number of extra
+ // |undefined| values to push on the stack.
+ static_assert(
+ sizeof(JitFrameLayout) % JitStackAlignment == 0,
+ "No need to consider the JitFrameLayout for aligning the stack");
+ static_assert(
+ JitStackAlignment % sizeof(Value) == 0,
+ "Ensure that we can pad the stack by pushing extra UndefinedValue");
+
+ MOZ_ASSERT(mozilla::IsPowerOfTwo(JitStackValueAlignment));
+ masm.add32(
+ Imm32(JitStackValueAlignment - 1 /* for padding */ + 1 /* for |this| */),
+ numArgsReg);
+ masm.add32(t2, numArgsReg);
+ masm.and32(Imm32(~(JitStackValueAlignment - 1)), numArgsReg);
+
+ // Load the number of |undefined|s to push into t1. Subtract 1 for |this|.
+ masm.ma_sub64(t1, numArgsReg, s3);
+ masm.sub32(Imm32(1), t1);
+
+ // Caller:
+ // [arg2] [arg1] [this] [ [argc] [callee] [descr] [raddr] ] <- sp
+ // '--- s3 ----'
+ //
+ // Rectifier frame:
+ // [fp'] [undef] [undef] [undef] [arg2] [arg1] [this] [ [argc] [callee]
+ // [descr] [raddr] ]
+ // '-------- t1 ---------' '--- s3 ----'
+
+ // Copy number of actual arguments into numActArgsReg.
+ masm.mov(s3, numActArgsReg);
+
+ masm.moveValue(UndefinedValue(), ValueOperand(t0));
+
+ // Push undefined. (including the padding)
+ {
+ Label undefLoopTop;
+
+ masm.bind(&undefLoopTop);
+ masm.sub32(Imm32(1), t1);
+ masm.subPtr(Imm32(sizeof(Value)), StackPointer);
+ masm.storeValue(ValueOperand(t0), Address(StackPointer, 0));
+
+ masm.ma_b(t1, t1, &undefLoopTop, Assembler::NonZero, ShortJump);
+ }
+
+ // Get the topmost argument.
+ static_assert(sizeof(Value) == 8, "TimesEight is used to skip arguments");
+
+ // Get the topmost argument.
+ masm.slli(t0, s3, 3); // t0 <- nargs * 8
+ masm.ma_add64(t1, FramePointer, t0); // t1 <- fp(saved sp) + nargs * 8
+ masm.addPtr(Imm32(sizeof(RectifierFrameLayout)), t1);
+
+ // Push arguments, |nargs| + 1 times (to include |this|).
+ masm.addPtr(Imm32(1), s3);
+ {
+ Label copyLoopTop;
+
+ masm.bind(&copyLoopTop);
+ masm.sub32(Imm32(1), s3);
+ masm.subPtr(Imm32(sizeof(Value)), StackPointer);
+ masm.loadValue(Address(t1, 0), ValueOperand(t0));
+ masm.storeValue(ValueOperand(t0), Address(StackPointer, 0));
+ masm.subPtr(Imm32(sizeof(Value)), t1);
+
+ masm.ma_b(s3, s3, &copyLoopTop, Assembler::NonZero, ShortJump);
+ }
+
+ // if constructing, copy newTarget
+ {
+ Label notConstructing;
+
+ masm.branchTest32(Assembler::Zero, calleeTokenReg,
+ Imm32(CalleeToken_FunctionConstructing),
+ &notConstructing);
+
+ // thisFrame[numFormals] = prevFrame[argc]
+ ValueOperand newTarget(t0);
+
+ // Load vp[argc]. Add sizeof(Value) for |this|.
+ BaseIndex newTargetSrc(FramePointer, numActArgsReg, TimesEight,
+ sizeof(RectifierFrameLayout) + sizeof(Value));
+ masm.loadValue(newTargetSrc, newTarget);
+
+ // Again, 1 for |this|
+ BaseIndex newTargetDest(StackPointer, t3, TimesEight, sizeof(Value));
+ masm.storeValue(newTarget, newTargetDest);
+
+ masm.bind(&notConstructing);
+ }
+
+ // Caller:
+ // [arg2] [arg1] [this] [ [argc] [callee] [descr] [raddr] ]
+ //
+ //
+ // Rectifier frame:
+ // [fp'] <- fp [undef] [undef] [undef] [arg2] [arg1] [this] <- sp [ [argc]
+ // [callee] [descr] [raddr] ]
+ //
+
+ // Construct JitFrameLayout.
+ masm.push(calleeTokenReg);
+ masm.pushFrameDescriptorForJitCall(FrameType::Rectifier, numActArgsReg,
+ numActArgsReg);
+
+ // Call the target function.
+ masm.andPtr(Imm32(uint32_t(CalleeTokenMask)), calleeTokenReg);
+ switch (kind) {
+ case ArgumentsRectifierKind::Normal:
+ masm.loadJitCodeRaw(calleeTokenReg, t1);
+ argumentsRectifierReturnOffset_ = masm.callJitNoProfiler(t1);
+ break;
+ case ArgumentsRectifierKind::TrialInlining:
+ Label noBaselineScript, done;
+ masm.loadBaselineJitCodeRaw(calleeTokenReg, t1, &noBaselineScript);
+ masm.callJitNoProfiler(t1);
+ masm.jump(&done);
+
+ // See BaselineCacheIRCompiler::emitCallInlinedFunction.
+ masm.bind(&noBaselineScript);
+ masm.loadJitCodeRaw(calleeTokenReg, t1);
+ masm.callJitNoProfiler(t1);
+ masm.bind(&done);
+ break;
+ }
+
+ masm.mov(FramePointer, StackPointer);
+ masm.pop(FramePointer);
+ masm.ret();
+}
+
+void JitRuntime::generateBailoutHandler(MacroAssembler& masm,
+ Label* bailoutTail) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateBailoutHandler");
+
+ bailoutHandlerOffset_ = startTrampolineCode(masm);
+
+ GenerateBailoutThunk(masm, bailoutTail);
+}
+
+uint32_t JitRuntime::generatePreBarrier(JSContext* cx, MacroAssembler& masm,
+ MIRType type) {
+ AutoCreatedBy acb(masm, "JitRuntime::generatePreBarrier");
+
+ uint32_t offset = startTrampolineCode(masm);
+
+ MOZ_ASSERT(PreBarrierReg == a1);
+ Register temp1 = a0;
+ Register temp2 = a2;
+ Register temp3 = a3;
+ masm.push(temp1);
+ masm.push(temp2);
+ masm.push(temp3);
+
+ Label noBarrier;
+ masm.emitPreBarrierFastPath(cx->runtime(), type, temp1, temp2, temp3,
+ &noBarrier);
+
+ // Call into C++ to mark this GC thing.
+ masm.pop(temp3);
+ masm.pop(temp2);
+ masm.pop(temp1);
+
+ LiveRegisterSet save;
+ save.set() = RegisterSet(GeneralRegisterSet(Registers::VolatileMask),
+ FloatRegisterSet(FloatRegisters::VolatileMask));
+ masm.push(ra);
+ masm.PushRegsInMask(save);
+
+ masm.movePtr(ImmPtr(cx->runtime()), a0);
+
+ masm.setupUnalignedABICall(a2);
+ masm.passABIArg(a0);
+ masm.passABIArg(a1);
+ masm.callWithABI(JitPreWriteBarrier(type));
+
+ masm.PopRegsInMask(save);
+ masm.ret();
+
+ masm.bind(&noBarrier);
+ masm.pop(temp3);
+ masm.pop(temp2);
+ masm.pop(temp1);
+ masm.abiret();
+
+ return offset;
+}
+
+void JitRuntime::generateBailoutTailStub(MacroAssembler& masm,
+ Label* bailoutTail) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateBailoutTailStub");
+
+ masm.bind(bailoutTail);
+ masm.generateBailoutTail(a1, a2);
+}
+
+bool JitRuntime::generateVMWrapper(JSContext* cx, MacroAssembler& masm,
+ const VMFunctionData& f, DynFn nativeFun,
+ uint32_t* wrapperOffset) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateVMWrapper");
+
+ *wrapperOffset = startTrampolineCode(masm);
+
+ // Avoid conflicts with argument registers while discarding the result after
+ // the function call.
+ AllocatableGeneralRegisterSet regs(Register::Codes::WrapperMask);
+
+ static_assert(
+ (Register::Codes::VolatileMask & ~Register::Codes::WrapperMask) == 0,
+ "Wrapper register set should be a superset of Volatile register set.");
+
+ // The context is the first argument; a0 is the first argument register.
+ Register cxreg = a0;
+ regs.take(cxreg);
+
+ // If it isn't a tail call, then the return address needs to be saved
+ if (f.expectTailCall == NonTailCall) {
+ masm.pushReturnAddress();
+ }
+
+ // Push the frame pointer to finish the exit frame, then link it up.
+ masm.Push(FramePointer);
+ masm.loadJSContext(cxreg);
+ masm.enterExitFrame(cxreg, regs.getAny(), &f);
+
+ // Save the base of the argument set stored on the stack.
+ Register argsBase = InvalidReg;
+ if (f.explicitArgs) {
+ argsBase = t1; // Use temporary register.
+ regs.take(argsBase);
+ masm.ma_add64(argsBase, StackPointer,
+ Imm32(ExitFrameLayout::SizeWithFooter()));
+ }
+
+ // Reserve space for the outparameter.
+ Register outReg = InvalidReg;
+ switch (f.outParam) {
+ case Type_Value:
+ outReg = regs.takeAny();
+ masm.reserveStack(sizeof(Value));
+ masm.movePtr(StackPointer, outReg);
+ break;
+
+ case Type_Handle:
+ outReg = regs.takeAny();
+ masm.PushEmptyRooted(f.outParamRootType);
+ masm.movePtr(StackPointer, outReg);
+ break;
+
+ case Type_Bool:
+ case Type_Int32:
+ outReg = regs.takeAny();
+ // Reserve 4-byte space to make stack aligned to 8-byte.
+ masm.reserveStack(2 * sizeof(int32_t));
+ masm.movePtr(StackPointer, outReg);
+ break;
+
+ case Type_Pointer:
+ outReg = regs.takeAny();
+ masm.reserveStack(sizeof(uintptr_t));
+ masm.movePtr(StackPointer, outReg);
+ break;
+
+ case Type_Double:
+ outReg = regs.takeAny();
+ masm.reserveStack(sizeof(double));
+ masm.movePtr(StackPointer, outReg);
+ break;
+
+ default:
+ MOZ_ASSERT(f.outParam == Type_Void);
+ break;
+ }
+
+ masm.setupUnalignedABICall(regs.getAny());
+ masm.passABIArg(cxreg);
+
+ size_t argDisp = 0;
+
+ // Copy any arguments.
+ for (uint32_t explicitArg = 0; explicitArg < f.explicitArgs; explicitArg++) {
+ switch (f.argProperties(explicitArg)) {
+ case VMFunctionData::WordByValue:
+ if (f.argPassedInFloatReg(explicitArg)) {
+ masm.passABIArg(MoveOperand(argsBase, argDisp), MoveOp::DOUBLE);
+ } else {
+ masm.passABIArg(MoveOperand(argsBase, argDisp), MoveOp::GENERAL);
+ }
+ argDisp += sizeof(void*);
+ break;
+ case VMFunctionData::WordByRef:
+ masm.passABIArg(
+ MoveOperand(argsBase, argDisp, MoveOperand::Kind::EffectiveAddress),
+ MoveOp::GENERAL);
+ argDisp += sizeof(void*);
+ break;
+ case VMFunctionData::DoubleByValue:
+ case VMFunctionData::DoubleByRef:
+ MOZ_CRASH(
+ "NYI: LOONG64 callVM should not be used with 128bits values.");
+ break;
+ }
+ }
+
+ // Copy the implicit outparam, if any.
+ if (InvalidReg != outReg) {
+ masm.passABIArg(outReg);
+ }
+
+ masm.callWithABI(nativeFun, MoveOp::GENERAL,
+ CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+
+ // Test for failure.
+ switch (f.failType()) {
+ case Type_Cell:
+ masm.branchTestPtr(Assembler::Zero, a0, a0, masm.failureLabel());
+ break;
+ case Type_Bool:
+ // Called functions return bools, which are 0/false and non-zero/true
+ masm.branchIfFalseBool(a0, masm.failureLabel());
+ break;
+ case Type_Void:
+ break;
+ default:
+ MOZ_CRASH("unknown failure kind");
+ }
+
+ // Load the outparam and free any allocated stack.
+ switch (f.outParam) {
+ case Type_Handle:
+ masm.popRooted(f.outParamRootType, ReturnReg, JSReturnOperand);
+ break;
+
+ case Type_Value:
+ masm.loadValue(Address(StackPointer, 0), JSReturnOperand);
+ masm.freeStack(sizeof(Value));
+ break;
+
+ case Type_Int32:
+ masm.load32(Address(StackPointer, 0), ReturnReg);
+ masm.freeStack(2 * sizeof(int32_t));
+ break;
+
+ case Type_Pointer:
+ masm.loadPtr(Address(StackPointer, 0), ReturnReg);
+ masm.freeStack(sizeof(uintptr_t));
+ break;
+
+ case Type_Bool:
+ masm.load8ZeroExtend(Address(StackPointer, 0), ReturnReg);
+ masm.freeStack(2 * sizeof(int32_t));
+ break;
+
+ case Type_Double:
+ masm.fld(ReturnDoubleReg, StackPointer, 0);
+ masm.freeStack(sizeof(double));
+ break;
+
+ default:
+ MOZ_ASSERT(f.outParam == Type_Void);
+ break;
+ }
+
+ // Pop ExitFooterFrame and the frame pointer.
+ masm.leaveExitFrame(sizeof(void*));
+
+ // Return. Subtract sizeof(void*) for the frame pointer.
+ masm.retn(Imm32(sizeof(ExitFrameLayout) - sizeof(void*) +
+ f.explicitStackSlots() * sizeof(void*) +
+ f.extraValuesToPop * sizeof(Value)));
+
+ return true;
+}
diff --git a/js/src/jit/riscv64/constant/Base-constant-riscv.cpp b/js/src/jit/riscv64/constant/Base-constant-riscv.cpp
new file mode 100644
index 0000000000..9658689775
--- /dev/null
+++ b/js/src/jit/riscv64/constant/Base-constant-riscv.cpp
@@ -0,0 +1,247 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#include "jit/riscv64/constant/Base-constant-riscv.h"
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Types.h"
+
+#include <stdio.h>
+
+#include "jit/riscv64/constant/Constant-riscv-c.h"
+#include "jit/riscv64/constant/Constant-riscv-d.h"
+#include "jit/riscv64/constant/Constant-riscv-f.h"
+#include "jit/riscv64/constant/Constant-riscv-i.h"
+#include "jit/riscv64/constant/Constant-riscv-m.h"
+#include "jit/riscv64/constant/Constant-riscv-v.h"
+#include "jit/riscv64/constant/Constant-riscv-zicsr.h"
+#include "jit/riscv64/constant/Constant-riscv-zifencei.h"
+#include "jit/riscv64/Simulator-riscv64.h"
+namespace js {
+namespace jit {
+
+int32_t ImmBranchMaxForwardOffset(OffsetSize bits) {
+ return (1 << (bits - 1)) - 1;
+}
+
+bool InstructionBase::IsShortInstruction() const {
+ uint8_t FirstByte = *reinterpret_cast<const uint8_t*>(this);
+ return (FirstByte & 0x03) <= C2;
+}
+
+template <class T>
+int InstructionGetters<T>::RvcRdValue() const {
+ MOZ_ASSERT(this->IsShortInstruction());
+ return this->Bits(kRvcRdShift + kRvcRdBits - 1, kRvcRdShift);
+}
+
+template <class T>
+int InstructionGetters<T>::RvcRs2Value() const {
+ MOZ_ASSERT(this->IsShortInstruction());
+ return this->Bits(kRvcRs2Shift + kRvcRs2Bits - 1, kRvcRs2Shift);
+}
+
+template <class T>
+int InstructionGetters<T>::RvcRs1sValue() const {
+ MOZ_ASSERT(this->IsShortInstruction());
+ return 0b1000 + this->Bits(kRvcRs1sShift + kRvcRs1sBits - 1, kRvcRs1sShift);
+}
+
+template <class T>
+int InstructionGetters<T>::RvcRs2sValue() const {
+ MOZ_ASSERT(this->IsShortInstruction());
+ return 0b1000 + this->Bits(kRvcRs2sShift + kRvcRs2sBits - 1, kRvcRs2sShift);
+}
+
+template <class T>
+inline int InstructionGetters<T>::RvcFunct6Value() const {
+ MOZ_ASSERT(this->IsShortInstruction());
+ return this->Bits(kRvcFunct6Shift + kRvcFunct6Bits - 1, kRvcFunct6Shift);
+}
+
+template <class T>
+inline int InstructionGetters<T>::RvcFunct4Value() const {
+ MOZ_ASSERT(this->IsShortInstruction());
+ return this->Bits(kRvcFunct4Shift + kRvcFunct4Bits - 1, kRvcFunct4Shift);
+}
+
+template <class T>
+inline int InstructionGetters<T>::RvcFunct3Value() const {
+ MOZ_ASSERT(this->IsShortInstruction());
+ return this->Bits(kRvcFunct3Shift + kRvcFunct3Bits - 1, kRvcFunct3Shift);
+}
+
+template <class T>
+inline int InstructionGetters<T>::RvcFunct2Value() const {
+ MOZ_ASSERT(this->IsShortInstruction());
+ return this->Bits(kRvcFunct2Shift + kRvcFunct2Bits - 1, kRvcFunct2Shift);
+}
+
+template <class T>
+inline int InstructionGetters<T>::RvcFunct2BValue() const {
+ MOZ_ASSERT(this->IsShortInstruction());
+ return this->Bits(kRvcFunct2BShift + kRvcFunct2Bits - 1, kRvcFunct2BShift);
+}
+
+template <class T>
+uint32_t InstructionGetters<T>::Rvvzimm() const {
+ if ((this->InstructionBits() &
+ (kBaseOpcodeMask | kFunct3Mask | 0x80000000)) == RO_V_VSETVLI) {
+ uint32_t Bits = this->InstructionBits();
+ uint32_t zimm = Bits & kRvvZimmMask;
+ return zimm >> kRvvZimmShift;
+ } else {
+ MOZ_ASSERT((this->InstructionBits() &
+ (kBaseOpcodeMask | kFunct3Mask | 0xC0000000)) == RO_V_VSETIVLI);
+ uint32_t Bits = this->InstructionBits();
+ uint32_t zimm = Bits & kRvvZimmMask;
+ return (zimm >> kRvvZimmShift) & 0x3FF;
+ }
+}
+
+template <class T>
+uint32_t InstructionGetters<T>::Rvvuimm() const {
+ MOZ_ASSERT((this->InstructionBits() &
+ (kBaseOpcodeMask | kFunct3Mask | 0xC0000000)) == RO_V_VSETIVLI);
+ uint32_t Bits = this->InstructionBits();
+ uint32_t uimm = Bits & kRvvUimmMask;
+ return uimm >> kRvvUimmShift;
+}
+
+template class InstructionGetters<InstructionBase>;
+#ifdef JS_SIMULATOR_RISCV64
+template class InstructionGetters<SimInstructionBase>;
+#endif
+
+OffsetSize InstructionBase::GetOffsetSize() const {
+ if (IsIllegalInstruction()) {
+ MOZ_CRASH("IllegalInstruction");
+ }
+ if (IsShortInstruction()) {
+ switch (InstructionBits() & kRvcOpcodeMask) {
+ case RO_C_J:
+ return kOffset11;
+ case RO_C_BEQZ:
+ case RO_C_BNEZ:
+ return kOffset9;
+ default:
+ MOZ_CRASH("IllegalInstruction");
+ }
+ } else {
+ switch (InstructionBits() & kBaseOpcodeMask) {
+ case BRANCH:
+ return kOffset13;
+ case JAL:
+ return kOffset21;
+ default:
+ MOZ_CRASH("IllegalInstruction");
+ }
+ }
+}
+
+InstructionBase::Type InstructionBase::InstructionType() const {
+ if (IsIllegalInstruction()) {
+ return kUnsupported;
+ }
+ // RV64C Instruction
+ if (IsShortInstruction()) {
+ switch (InstructionBits() & kRvcOpcodeMask) {
+ case RO_C_ADDI4SPN:
+ return kCIWType;
+ case RO_C_FLD:
+ case RO_C_LW:
+#ifdef JS_CODEGEN_RISCV64
+ case RO_C_LD:
+#endif
+ return kCLType;
+ case RO_C_FSD:
+ case RO_C_SW:
+#ifdef JS_CODEGEN_RISCV64
+ case RO_C_SD:
+#endif
+ return kCSType;
+ case RO_C_NOP_ADDI:
+ case RO_C_LI:
+#ifdef JS_CODEGEN_RISCV64
+ case RO_C_ADDIW:
+#endif
+ case RO_C_LUI_ADD:
+ return kCIType;
+ case RO_C_MISC_ALU:
+ if (Bits(11, 10) != 0b11)
+ return kCBType;
+ else
+ return kCAType;
+ case RO_C_J:
+ return kCJType;
+ case RO_C_BEQZ:
+ case RO_C_BNEZ:
+ return kCBType;
+ case RO_C_SLLI:
+ case RO_C_FLDSP:
+ case RO_C_LWSP:
+#ifdef JS_CODEGEN_RISCV64
+ case RO_C_LDSP:
+#endif
+ return kCIType;
+ case RO_C_JR_MV_ADD:
+ return kCRType;
+ case RO_C_FSDSP:
+ case RO_C_SWSP:
+#ifdef JS_CODEGEN_RISCV64
+ case RO_C_SDSP:
+#endif
+ return kCSSType;
+ default:
+ break;
+ }
+ } else {
+ // RISCV routine
+ switch (InstructionBits() & kBaseOpcodeMask) {
+ case LOAD:
+ return kIType;
+ case LOAD_FP:
+ return kIType;
+ case MISC_MEM:
+ return kIType;
+ case OP_IMM:
+ return kIType;
+ case AUIPC:
+ return kUType;
+ case OP_IMM_32:
+ return kIType;
+ case STORE:
+ return kSType;
+ case STORE_FP:
+ return kSType;
+ case AMO:
+ return kRType;
+ case OP:
+ return kRType;
+ case LUI:
+ return kUType;
+ case OP_32:
+ return kRType;
+ case MADD:
+ case MSUB:
+ case NMSUB:
+ case NMADD:
+ return kR4Type;
+ case OP_FP:
+ return kRType;
+ case BRANCH:
+ return kBType;
+ case JALR:
+ return kIType;
+ case JAL:
+ return kJType;
+ case SYSTEM:
+ return kIType;
+ case OP_V:
+ return kVType;
+ }
+ }
+ return kUnsupported;
+}
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/riscv64/constant/Base-constant-riscv.h b/js/src/jit/riscv64/constant/Base-constant-riscv.h
new file mode 100644
index 0000000000..929ccd67b5
--- /dev/null
+++ b/js/src/jit/riscv64/constant/Base-constant-riscv.h
@@ -0,0 +1,1057 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#ifndef jit_riscv64_constant_Base_constant_riscv__h_
+#define jit_riscv64_constant_Base_constant_riscv__h_
+namespace js {
+namespace jit {
+
+// On RISC-V Simulator breakpoints can have different codes:
+// - Breaks between 0 and kMaxWatchpointCode are treated as simple watchpoints,
+// the simulator will run through them and print the registers.
+// - Breaks between kMaxWatchpointCode and kMaxStopCode are treated as stop()
+// instructions (see Assembler::stop()).
+// - Breaks larger than kMaxStopCode are simple breaks, dropping you into the
+// debugger.
+const uint32_t kMaxTracepointCode = 63;
+const uint32_t kMaxWatchpointCode = 31;
+const uint32_t kMaxStopCode = 127;
+const uint32_t kWasmTrapCode = 6;
+static_assert(kMaxWatchpointCode < kMaxStopCode);
+static_assert(kMaxTracepointCode < kMaxStopCode);
+
+// Debug parameters.
+//
+// For example:
+//
+// __ Debug(TRACE_ENABLE | LOG_TRACE);
+// starts tracing: set v8_flags.trace-sim is true.
+// __ Debug(TRACE_ENABLE | LOG_REGS);
+// PrintAllregs.
+// __ Debug(TRACE_DISABLE | LOG_TRACE);
+// stops tracing: set v8_flags.trace-sim is false.
+const uint32_t kDebuggerTracingDirectivesMask = 0b111 << 3;
+enum DebugParameters : uint32_t {
+ NO_PARAM = 1 << 5,
+ BREAK = 1 << 0,
+ LOG_TRACE = 1 << 1,
+ LOG_REGS = 1 << 2,
+ LOG_ALL = LOG_TRACE,
+ // Trace control.
+ TRACE_ENABLE = 1 << 3 | NO_PARAM,
+ TRACE_DISABLE = 1 << 4 | NO_PARAM,
+};
+// On RISCV all instructions are 32 bits, except for RVC.
+using Instr = int32_t;
+using ShortInstr = int16_t;
+typedef unsigned char byte;
+// ----- Fields offset and length.
+// RISCV constants
+const int kBaseOpcodeShift = 0;
+const int kBaseOpcodeBits = 7;
+const int kFunct7Shift = 25;
+const int kFunct7Bits = 7;
+const int kFunct5Shift = 27;
+const int kFunct5Bits = 5;
+const int kFunct3Shift = 12;
+const int kFunct3Bits = 3;
+const int kFunct2Shift = 25;
+const int kFunct2Bits = 2;
+const int kRs1Shift = 15;
+const int kRs1Bits = 5;
+const int kVs1Shift = 15;
+const int kVs1Bits = 5;
+const int kVs2Shift = 20;
+const int kVs2Bits = 5;
+const int kVdShift = 7;
+const int kVdBits = 5;
+const int kRs2Shift = 20;
+const int kRs2Bits = 5;
+const int kRs3Shift = 27;
+const int kRs3Bits = 5;
+const int kRdShift = 7;
+const int kRdBits = 5;
+const int kRlShift = 25;
+const int kAqShift = 26;
+const int kImm12Shift = 20;
+const int kImm12Bits = 12;
+const int kImm11Shift = 2;
+const int kImm11Bits = 11;
+const int kShamtShift = 20;
+const int kShamtBits = 5;
+const int kShamtWShift = 20;
+// FIXME: remove this once we have a proper way to handle the wide shift amount
+const int kShamtWBits = 6;
+const int kArithShiftShift = 30;
+const int kImm20Shift = 12;
+const int kImm20Bits = 20;
+const int kCsrShift = 20;
+const int kCsrBits = 12;
+const int kMemOrderBits = 4;
+const int kPredOrderShift = 24;
+const int kSuccOrderShift = 20;
+
+// for C extension
+const int kRvcFunct4Shift = 12;
+const int kRvcFunct4Bits = 4;
+const int kRvcFunct3Shift = 13;
+const int kRvcFunct3Bits = 3;
+const int kRvcRs1Shift = 7;
+const int kRvcRs1Bits = 5;
+const int kRvcRs2Shift = 2;
+const int kRvcRs2Bits = 5;
+const int kRvcRdShift = 7;
+const int kRvcRdBits = 5;
+const int kRvcRs1sShift = 7;
+const int kRvcRs1sBits = 3;
+const int kRvcRs2sShift = 2;
+const int kRvcRs2sBits = 3;
+const int kRvcFunct2Shift = 5;
+const int kRvcFunct2BShift = 10;
+const int kRvcFunct2Bits = 2;
+const int kRvcFunct6Shift = 10;
+const int kRvcFunct6Bits = 6;
+
+const uint32_t kRvcOpcodeMask =
+ 0b11 | (((1 << kRvcFunct3Bits) - 1) << kRvcFunct3Shift);
+const uint32_t kRvcFunct3Mask =
+ (((1 << kRvcFunct3Bits) - 1) << kRvcFunct3Shift);
+const uint32_t kRvcFunct4Mask =
+ (((1 << kRvcFunct4Bits) - 1) << kRvcFunct4Shift);
+const uint32_t kRvcFunct6Mask =
+ (((1 << kRvcFunct6Bits) - 1) << kRvcFunct6Shift);
+const uint32_t kRvcFunct2Mask =
+ (((1 << kRvcFunct2Bits) - 1) << kRvcFunct2Shift);
+const uint32_t kRvcFunct2BMask =
+ (((1 << kRvcFunct2Bits) - 1) << kRvcFunct2BShift);
+const uint32_t kCRTypeMask = kRvcOpcodeMask | kRvcFunct4Mask;
+const uint32_t kCSTypeMask = kRvcOpcodeMask | kRvcFunct6Mask;
+const uint32_t kCATypeMask = kRvcOpcodeMask | kRvcFunct6Mask | kRvcFunct2Mask;
+const uint32_t kRvcBImm8Mask = (((1 << 5) - 1) << 2) | (((1 << 3) - 1) << 10);
+
+// RISCV Instruction bit masks
+const uint32_t kBaseOpcodeMask = ((1 << kBaseOpcodeBits) - 1)
+ << kBaseOpcodeShift;
+const uint32_t kFunct3Mask = ((1 << kFunct3Bits) - 1) << kFunct3Shift;
+const uint32_t kFunct5Mask = ((1 << kFunct5Bits) - 1) << kFunct5Shift;
+const uint32_t kFunct7Mask = ((1 << kFunct7Bits) - 1) << kFunct7Shift;
+const uint32_t kFunct2Mask = 0b11 << kFunct7Shift;
+const uint32_t kRTypeMask = kBaseOpcodeMask | kFunct3Mask | kFunct7Mask;
+const uint32_t kRATypeMask = kBaseOpcodeMask | kFunct3Mask | kFunct5Mask;
+const uint32_t kRFPTypeMask = kBaseOpcodeMask | kFunct7Mask;
+const uint32_t kR4TypeMask = kBaseOpcodeMask | kFunct3Mask | kFunct2Mask;
+const uint32_t kITypeMask = kBaseOpcodeMask | kFunct3Mask;
+const uint32_t kSTypeMask = kBaseOpcodeMask | kFunct3Mask;
+const uint32_t kBTypeMask = kBaseOpcodeMask | kFunct3Mask;
+const uint32_t kUTypeMask = kBaseOpcodeMask;
+const uint32_t kJTypeMask = kBaseOpcodeMask;
+const uint32_t kRs1FieldMask = ((1 << kRs1Bits) - 1) << kRs1Shift;
+const uint32_t kRs2FieldMask = ((1 << kRs2Bits) - 1) << kRs2Shift;
+const uint32_t kRs3FieldMask = ((1 << kRs3Bits) - 1) << kRs3Shift;
+const uint32_t kRdFieldMask = ((1 << kRdBits) - 1) << kRdShift;
+const uint32_t kBImm12Mask = kFunct7Mask | kRdFieldMask;
+const uint32_t kImm20Mask = ((1 << kImm20Bits) - 1) << kImm20Shift;
+const uint32_t kImm12Mask = ((1 << kImm12Bits) - 1) << kImm12Shift;
+const uint32_t kImm11Mask = ((1 << kImm11Bits) - 1) << kImm11Shift;
+const uint32_t kImm31_12Mask = ((1 << 20) - 1) << 12;
+const uint32_t kImm19_0Mask = ((1 << 20) - 1);
+
+// for RVV extension
+#define RVV_LMUL(V) \
+ V(m1) \
+ V(m2) \
+ V(m4) \
+ V(m8) \
+ V(RESERVERD) \
+ V(mf8) \
+ V(mf4) \
+ V(mf2)
+
+enum Vlmul {
+#define DEFINE_FLAG(name) name,
+ RVV_LMUL(DEFINE_FLAG)
+#undef DEFINE_FLAG
+};
+
+#define RVV_SEW(V) \
+ V(E8) \
+ V(E16) \
+ V(E32) \
+ V(E64)
+
+#define DEFINE_FLAG(name) name,
+enum VSew {
+ RVV_SEW(DEFINE_FLAG)
+#undef DEFINE_FLAG
+};
+
+constexpr int kRvvELEN = 64;
+constexpr int kRvvVLEN = 128;
+constexpr int kRvvSLEN = kRvvVLEN;
+const int kRvvFunct6Shift = 26;
+const int kRvvFunct6Bits = 6;
+const uint32_t kRvvFunct6Mask =
+ (((1 << kRvvFunct6Bits) - 1) << kRvvFunct6Shift);
+
+const int kRvvVmBits = 1;
+const int kRvvVmShift = 25;
+const uint32_t kRvvVmMask = (((1 << kRvvVmBits) - 1) << kRvvVmShift);
+
+const int kRvvVs2Bits = 5;
+const int kRvvVs2Shift = 20;
+const uint32_t kRvvVs2Mask = (((1 << kRvvVs2Bits) - 1) << kRvvVs2Shift);
+
+const int kRvvVs1Bits = 5;
+const int kRvvVs1Shift = 15;
+const uint32_t kRvvVs1Mask = (((1 << kRvvVs1Bits) - 1) << kRvvVs1Shift);
+
+const int kRvvRs1Bits = kRvvVs1Bits;
+const int kRvvRs1Shift = kRvvVs1Shift;
+const uint32_t kRvvRs1Mask = (((1 << kRvvRs1Bits) - 1) << kRvvRs1Shift);
+
+const int kRvvRs2Bits = 5;
+const int kRvvRs2Shift = 20;
+const uint32_t kRvvRs2Mask = (((1 << kRvvRs2Bits) - 1) << kRvvRs2Shift);
+
+const int kRvvImm5Bits = kRvvVs1Bits;
+const int kRvvImm5Shift = kRvvVs1Shift;
+const uint32_t kRvvImm5Mask = (((1 << kRvvImm5Bits) - 1) << kRvvImm5Shift);
+
+const int kRvvVdBits = 5;
+const int kRvvVdShift = 7;
+const uint32_t kRvvVdMask = (((1 << kRvvVdBits) - 1) << kRvvVdShift);
+
+const int kRvvRdBits = kRvvVdBits;
+const int kRvvRdShift = kRvvVdShift;
+const uint32_t kRvvRdMask = (((1 << kRvvRdBits) - 1) << kRvvRdShift);
+
+const int kRvvZimmBits = 11;
+const int kRvvZimmShift = 20;
+const uint32_t kRvvZimmMask = (((1 << kRvvZimmBits) - 1) << kRvvZimmShift);
+
+const int kRvvUimmShift = kRvvRs1Shift;
+const int kRvvUimmBits = kRvvRs1Bits;
+const uint32_t kRvvUimmMask = (((1 << kRvvUimmBits) - 1) << kRvvUimmShift);
+
+const int kRvvWidthBits = 3;
+const int kRvvWidthShift = 12;
+const uint32_t kRvvWidthMask = (((1 << kRvvWidthBits) - 1) << kRvvWidthShift);
+
+const int kRvvMopBits = 2;
+const int kRvvMopShift = 26;
+const uint32_t kRvvMopMask = (((1 << kRvvMopBits) - 1) << kRvvMopShift);
+
+const int kRvvMewBits = 1;
+const int kRvvMewShift = 28;
+const uint32_t kRvvMewMask = (((1 << kRvvMewBits) - 1) << kRvvMewShift);
+
+const int kRvvNfBits = 3;
+const int kRvvNfShift = 29;
+const uint32_t kRvvNfMask = (((1 << kRvvNfBits) - 1) << kRvvNfShift);
+
+const int kNopByte = 0x00000013;
+
+enum BaseOpcode : uint32_t {
+ LOAD = 0b0000011, // I form: LB LH LW LBU LHU
+ LOAD_FP = 0b0000111, // I form: FLW FLD FLQ
+ MISC_MEM = 0b0001111, // I special form: FENCE FENCE.I
+ OP_IMM = 0b0010011, // I form: ADDI SLTI SLTIU XORI ORI ANDI SLLI SRLI SRAI
+ // Note: SLLI/SRLI/SRAI I form first, then func3 001/101 => R type
+ AUIPC = 0b0010111, // U form: AUIPC
+ OP_IMM_32 = 0b0011011, // I form: ADDIW SLLIW SRLIW SRAIW
+ // Note: SRLIW SRAIW I form first, then func3 101 special shift encoding
+ STORE = 0b0100011, // S form: SB SH SW SD
+ STORE_FP = 0b0100111, // S form: FSW FSD FSQ
+ AMO = 0b0101111, // R form: All A instructions
+ OP = 0b0110011, // R: ADD SUB SLL SLT SLTU XOR SRL SRA OR AND and 32M set
+ LUI = 0b0110111, // U form: LUI
+ OP_32 = 0b0111011, // R: ADDW SUBW SLLW SRLW SRAW MULW DIVW DIVUW REMW REMUW
+ MADD = 0b1000011, // R4 type: FMADD.S FMADD.D FMADD.Q
+ MSUB = 0b1000111, // R4 type: FMSUB.S FMSUB.D FMSUB.Q
+ NMSUB = 0b1001011, // R4 type: FNMSUB.S FNMSUB.D FNMSUB.Q
+ NMADD = 0b1001111, // R4 type: FNMADD.S FNMADD.D FNMADD.Q
+ OP_FP = 0b1010011, // R type: Q ext
+ BRANCH = 0b1100011, // B form: BEQ BNE, BLT, BGE, BLTU BGEU
+ JALR = 0b1100111, // I form: JALR
+ JAL = 0b1101111, // J form: JAL
+ SYSTEM = 0b1110011, // I form: ECALL EBREAK Zicsr ext
+ OP_V = 0b1010111, // V form: RVV
+
+ // C extension
+ C0 = 0b00,
+ C1 = 0b01,
+ C2 = 0b10,
+ FUNCT2_0 = 0b00,
+ FUNCT2_1 = 0b01,
+ FUNCT2_2 = 0b10,
+ FUNCT2_3 = 0b11,
+};
+
+// ----- Emulated conditions.
+// On RISC-V we use this enum to abstract from conditional branch instructions.
+// The 'U' prefix is used to specify unsigned comparisons.
+// Opposite conditions must be paired as odd/even numbers
+// because 'NegateCondition' function flips LSB to negate condition.
+enum RiscvCondition { // Any value < 0 is considered no_condition.
+ overflow = 0,
+ no_overflow = 1,
+ Uless = 2,
+ Ugreater_equal = 3,
+ Uless_equal = 4,
+ Ugreater = 5,
+ equal = 6,
+ not_equal = 7, // Unordered or Not Equal.
+ less = 8,
+ greater_equal = 9,
+ less_equal = 10,
+ greater = 11,
+ cc_always = 12,
+
+ // Aliases.
+ eq = equal,
+ ne = not_equal,
+ ge = greater_equal,
+ lt = less,
+ gt = greater,
+ le = less_equal,
+ al = cc_always,
+ ult = Uless,
+ uge = Ugreater_equal,
+ ule = Uless_equal,
+ ugt = Ugreater,
+};
+
+// ----- Coprocessor conditions.
+enum FPUCondition {
+ kNoFPUCondition = -1,
+ EQ = 0x02, // Ordered and Equal
+ NE = 0x03, // Unordered or Not Equal
+ LT = 0x04, // Ordered and Less Than
+ GE = 0x05, // Ordered and Greater Than or Equal
+ LE = 0x06, // Ordered and Less Than or Equal
+ GT = 0x07, // Ordered and Greater Than
+};
+
+enum CheckForInexactConversion {
+ kCheckForInexactConversion,
+ kDontCheckForInexactConversion
+};
+
+enum class MaxMinKind : int { kMin = 0, kMax = 1 };
+
+// ----------------------------------------------------------------------------
+// RISCV flags
+
+enum ControlStatusReg {
+ csr_fflags = 0x001, // Floating-Point Accrued Exceptions (RW)
+ csr_frm = 0x002, // Floating-Point Dynamic Rounding Mode (RW)
+ csr_fcsr = 0x003, // Floating-Point Control and Status Register (RW)
+ csr_cycle = 0xc00, // Cycle counter for RDCYCLE instruction (RO)
+ csr_time = 0xc01, // Timer for RDTIME instruction (RO)
+ csr_instret = 0xc02, // Insns-retired counter for RDINSTRET instruction (RO)
+ csr_cycleh = 0xc80, // Upper 32 bits of cycle, RV32I only (RO)
+ csr_timeh = 0xc81, // Upper 32 bits of time, RV32I only (RO)
+ csr_instreth = 0xc82 // Upper 32 bits of instret, RV32I only (RO)
+};
+
+enum FFlagsMask {
+ kInvalidOperation = 0b10000, // NV: Invalid
+ kDivideByZero = 0b1000, // DZ: Divide by Zero
+ kOverflow = 0b100, // OF: Overflow
+ kUnderflow = 0b10, // UF: Underflow
+ kInexact = 0b1 // NX: Inexact
+};
+
+enum FPURoundingMode {
+ RNE = 0b000, // Round to Nearest, ties to Even
+ RTZ = 0b001, // Round towards Zero
+ RDN = 0b010, // Round Down (towards -infinity)
+ RUP = 0b011, // Round Up (towards +infinity)
+ RMM = 0b100, // Round to Nearest, tiest to Max Magnitude
+ DYN = 0b111 // In instruction's rm field, selects dynamic rounding mode;
+ // In Rounding Mode register, Invalid
+};
+
+enum MemoryOdering {
+ PSI = 0b1000, // PI or SI
+ PSO = 0b0100, // PO or SO
+ PSR = 0b0010, // PR or SR
+ PSW = 0b0001, // PW or SW
+ PSIORW = PSI | PSO | PSR | PSW
+};
+
+const int kFloat32ExponentBias = 127;
+const int kFloat32MantissaBits = 23;
+const int kFloat32ExponentBits = 8;
+const int kFloat64ExponentBias = 1023;
+const int kFloat64MantissaBits = 52;
+const int kFloat64ExponentBits = 11;
+
+enum FClassFlag {
+ kNegativeInfinity = 1,
+ kNegativeNormalNumber = 1 << 1,
+ kNegativeSubnormalNumber = 1 << 2,
+ kNegativeZero = 1 << 3,
+ kPositiveZero = 1 << 4,
+ kPositiveSubnormalNumber = 1 << 5,
+ kPositiveNormalNumber = 1 << 6,
+ kPositiveInfinity = 1 << 7,
+ kSignalingNaN = 1 << 8,
+ kQuietNaN = 1 << 9
+};
+
+enum OffsetSize : uint32_t {
+ kOffset21 = 21, // RISCV jal
+ kOffset12 = 12, // RISCV imm12
+ kOffset20 = 20, // RISCV imm20
+ kOffset13 = 13, // RISCV branch
+ kOffset32 = 32, // RISCV auipc + instr_I
+ kOffset11 = 11, // RISCV C_J
+ kOffset9 = 9, // RISCV compressed branch
+};
+
+// The classes of immediate branch ranges, in order of increasing range.
+// Note that CondBranchType and CompareBranchType have the same range.
+enum ImmBranchRangeType {
+ CondBranchRangeType, //
+ UncondBranchRangeType, //
+ UnknownBranchRangeType,
+
+ // Number of 'short-range' branch range types.
+ // We don't consider unconditional branches 'short-range'.
+ NumShortBranchRangeTypes = UnknownBranchRangeType
+};
+
+inline ImmBranchRangeType OffsetSizeToImmBranchRangeType(OffsetSize bits) {
+ switch (bits) {
+ case kOffset21:
+ return UncondBranchRangeType;
+ case kOffset13:
+ return CondBranchRangeType;
+ default:
+ MOZ_CRASH("Unimplement");
+ }
+}
+
+inline OffsetSize ImmBranchRangeTypeToOffsetSize(ImmBranchRangeType type) {
+ switch (type) {
+ case CondBranchRangeType:
+ return kOffset13;
+ case UncondBranchRangeType:
+ return kOffset21;
+ default:
+ MOZ_CRASH("Unimplement");
+ }
+}
+
+int32_t ImmBranchMaxForwardOffset(OffsetSize bits);
+
+inline int32_t ImmBranchMaxForwardOffset(ImmBranchRangeType type) {
+ return ImmBranchMaxForwardOffset(ImmBranchRangeTypeToOffsetSize(type));
+}
+// -----------------------------------------------------------------------------
+// Specific instructions, constants, and masks.
+// These constants are declared in assembler-riscv64.cc, as they use named
+// registers and other constants.
+
+// An Illegal instruction
+const Instr kIllegalInstr = 0; // All other bits are 0s (i.e., ecall)
+// An ECALL instruction, used for redirected real time call
+const Instr rtCallRedirInstr = SYSTEM; // All other bits are 0s (i.e., ecall)
+// An EBreak instruction, used for debugging and semi-hosting
+const Instr kBreakInstr = SYSTEM | 1 << kImm12Shift; // ebreak
+
+constexpr uint8_t kInstrSize = 4;
+constexpr uint8_t kShortInstrSize = 2;
+constexpr uint8_t kInstrSizeLog2 = 2;
+
+class InstructionBase {
+ public:
+ enum {
+ // On RISC-V, PC cannot actually be directly accessed. We behave as if PC
+ // was always the value of the current instruction being executed.
+ kPCReadOffset = 0
+ };
+
+ // Instruction type.
+ enum Type {
+ kRType,
+ kR4Type, // Special R4 for Q extension
+ kIType,
+ kSType,
+ kBType,
+ kUType,
+ kJType,
+ // C extension
+ kCRType,
+ kCIType,
+ kCSSType,
+ kCIWType,
+ kCLType,
+ kCSType,
+ kCAType,
+ kCBType,
+ kCJType,
+ // V extension
+ kVType,
+ kVLType,
+ kVSType,
+ kVAMOType,
+ kVIVVType,
+ kVFVVType,
+ kVMVVType,
+ kVIVIType,
+ kVIVXType,
+ kVFVFType,
+ kVMVXType,
+ kVSETType,
+ kUnsupported = -1
+ };
+
+ inline bool IsIllegalInstruction() const {
+ uint16_t FirstHalfWord = *reinterpret_cast<const uint16_t*>(this);
+ return FirstHalfWord == 0;
+ }
+
+ bool IsShortInstruction() const;
+
+ inline uint8_t InstructionSize() const {
+ return (this->IsShortInstruction()) ? kShortInstrSize : kInstrSize;
+ }
+
+ // Get the raw instruction bits.
+ inline Instr InstructionBits() const {
+ if (this->IsShortInstruction()) {
+ return 0x0000FFFF & (*reinterpret_cast<const ShortInstr*>(this));
+ }
+ return *reinterpret_cast<const Instr*>(this);
+ }
+
+ // Set the raw instruction bits to value.
+ inline void SetInstructionBits(Instr value) {
+ *reinterpret_cast<Instr*>(this) = value;
+ }
+
+ // Read one particular bit out of the instruction bits.
+ inline int Bit(int nr) const { return (InstructionBits() >> nr) & 1; }
+
+ // Read a bit field out of the instruction bits.
+ inline int Bits(int hi, int lo) const {
+ return (InstructionBits() >> lo) & ((2U << (hi - lo)) - 1);
+ }
+
+ // Accessors for the different named fields used in the RISC-V encoding.
+ inline BaseOpcode BaseOpcodeValue() const {
+ return static_cast<BaseOpcode>(
+ Bits(kBaseOpcodeShift + kBaseOpcodeBits - 1, kBaseOpcodeShift));
+ }
+
+ // Return the fields at their original place in the instruction encoding.
+ inline BaseOpcode BaseOpcodeFieldRaw() const {
+ return static_cast<BaseOpcode>(InstructionBits() & kBaseOpcodeMask);
+ }
+
+ // Safe to call within R-type instructions
+ inline int Funct7FieldRaw() const { return InstructionBits() & kFunct7Mask; }
+
+ // Safe to call within R-, I-, S-, or B-type instructions
+ inline int Funct3FieldRaw() const { return InstructionBits() & kFunct3Mask; }
+
+ // Safe to call within R-, I-, S-, or B-type instructions
+ inline int Rs1FieldRawNoAssert() const {
+ return InstructionBits() & kRs1FieldMask;
+ }
+
+ // Safe to call within R-, S-, or B-type instructions
+ inline int Rs2FieldRawNoAssert() const {
+ return InstructionBits() & kRs2FieldMask;
+ }
+
+ // Safe to call within R4-type instructions
+ inline int Rs3FieldRawNoAssert() const {
+ return InstructionBits() & kRs3FieldMask;
+ }
+
+ inline int32_t ITypeBits() const { return InstructionBits() & kITypeMask; }
+
+ inline int32_t InstructionOpcodeType() const {
+ if (IsShortInstruction()) {
+ return InstructionBits() & kRvcOpcodeMask;
+ } else {
+ return InstructionBits() & kBaseOpcodeMask;
+ }
+ }
+
+ // Get the encoding type of the instruction.
+ Type InstructionType() const;
+ OffsetSize GetOffsetSize() const;
+ inline ImmBranchRangeType GetImmBranchRangeType() const {
+ return OffsetSizeToImmBranchRangeType(GetOffsetSize());
+ }
+
+ protected:
+ InstructionBase() {}
+};
+
+template <class T>
+class InstructionGetters : public T {
+ public:
+ // Say if the instruction is a break or a trap.
+ bool IsTrap() const;
+
+ inline int BaseOpcode() const {
+ return this->InstructionBits() & kBaseOpcodeMask;
+ }
+
+ inline int RvcOpcode() const {
+ MOZ_ASSERT(this->IsShortInstruction());
+ return this->InstructionBits() & kRvcOpcodeMask;
+ }
+
+ inline int Rs1Value() const {
+ MOZ_ASSERT(this->InstructionType() == InstructionBase::kRType ||
+ this->InstructionType() == InstructionBase::kR4Type ||
+ this->InstructionType() == InstructionBase::kIType ||
+ this->InstructionType() == InstructionBase::kSType ||
+ this->InstructionType() == InstructionBase::kBType ||
+ this->InstructionType() == InstructionBase::kIType ||
+ this->InstructionType() == InstructionBase::kVType);
+ return this->Bits(kRs1Shift + kRs1Bits - 1, kRs1Shift);
+ }
+
+ inline int Rs2Value() const {
+ MOZ_ASSERT(this->InstructionType() == InstructionBase::kRType ||
+ this->InstructionType() == InstructionBase::kR4Type ||
+ this->InstructionType() == InstructionBase::kSType ||
+ this->InstructionType() == InstructionBase::kBType ||
+ this->InstructionType() == InstructionBase::kIType ||
+ this->InstructionType() == InstructionBase::kVType);
+ return this->Bits(kRs2Shift + kRs2Bits - 1, kRs2Shift);
+ }
+
+ inline int Rs3Value() const {
+ MOZ_ASSERT(this->InstructionType() == InstructionBase::kR4Type);
+ return this->Bits(kRs3Shift + kRs3Bits - 1, kRs3Shift);
+ }
+
+ inline int Vs1Value() const {
+ MOZ_ASSERT(this->InstructionType() == InstructionBase::kVType ||
+ this->InstructionType() == InstructionBase::kIType ||
+ this->InstructionType() == InstructionBase::kSType);
+ return this->Bits(kVs1Shift + kVs1Bits - 1, kVs1Shift);
+ }
+
+ inline int Vs2Value() const {
+ MOZ_ASSERT(this->InstructionType() == InstructionBase::kVType ||
+ this->InstructionType() == InstructionBase::kIType ||
+ this->InstructionType() == InstructionBase::kSType);
+ return this->Bits(kVs2Shift + kVs2Bits - 1, kVs2Shift);
+ }
+
+ inline int VdValue() const {
+ MOZ_ASSERT(this->InstructionType() == InstructionBase::kVType ||
+ this->InstructionType() == InstructionBase::kIType ||
+ this->InstructionType() == InstructionBase::kSType);
+ return this->Bits(kVdShift + kVdBits - 1, kVdShift);
+ }
+
+ inline int RdValue() const {
+ MOZ_ASSERT(this->InstructionType() == InstructionBase::kRType ||
+ this->InstructionType() == InstructionBase::kR4Type ||
+ this->InstructionType() == InstructionBase::kIType ||
+ this->InstructionType() == InstructionBase::kSType ||
+ this->InstructionType() == InstructionBase::kUType ||
+ this->InstructionType() == InstructionBase::kJType ||
+ this->InstructionType() == InstructionBase::kVType);
+ return this->Bits(kRdShift + kRdBits - 1, kRdShift);
+ }
+
+ inline int RvcRs1Value() const { return this->RvcRdValue(); }
+
+ int RvcRdValue() const;
+
+ int RvcRs2Value() const;
+
+ int RvcRs1sValue() const;
+
+ int RvcRs2sValue() const;
+
+ int Funct7Value() const;
+
+ inline int Funct3Value() const {
+ MOZ_ASSERT(this->InstructionType() == InstructionBase::kRType ||
+ this->InstructionType() == InstructionBase::kIType ||
+ this->InstructionType() == InstructionBase::kSType ||
+ this->InstructionType() == InstructionBase::kBType);
+ return this->Bits(kFunct3Shift + kFunct3Bits - 1, kFunct3Shift);
+ }
+
+ inline int Funct5Value() const {
+ MOZ_ASSERT(this->InstructionType() == InstructionBase::kRType &&
+ this->BaseOpcode() == OP_FP);
+ return this->Bits(kFunct5Shift + kFunct5Bits - 1, kFunct5Shift);
+ }
+
+ int RvcFunct6Value() const;
+
+ int RvcFunct4Value() const;
+
+ int RvcFunct3Value() const;
+
+ int RvcFunct2Value() const;
+
+ int RvcFunct2BValue() const;
+
+ inline int CsrValue() const {
+ MOZ_ASSERT(this->InstructionType() == InstructionBase::kIType &&
+ this->BaseOpcode() == SYSTEM);
+ return (this->Bits(kCsrShift + kCsrBits - 1, kCsrShift));
+ }
+
+ inline int RoundMode() const {
+ MOZ_ASSERT((this->InstructionType() == InstructionBase::kRType ||
+ this->InstructionType() == InstructionBase::kR4Type) &&
+ this->BaseOpcode() == OP_FP);
+ return this->Bits(kFunct3Shift + kFunct3Bits - 1, kFunct3Shift);
+ }
+
+ inline int MemoryOrder(bool is_pred) const {
+ MOZ_ASSERT((this->InstructionType() == InstructionBase::kIType &&
+ this->BaseOpcode() == MISC_MEM));
+ if (is_pred) {
+ return this->Bits(kPredOrderShift + kMemOrderBits - 1, kPredOrderShift);
+ } else {
+ return this->Bits(kSuccOrderShift + kMemOrderBits - 1, kSuccOrderShift);
+ }
+ }
+
+ inline int Imm12Value() const {
+ MOZ_ASSERT(this->InstructionType() == InstructionBase::kIType);
+ int Value = this->Bits(kImm12Shift + kImm12Bits - 1, kImm12Shift);
+ return Value << 20 >> 20;
+ }
+
+ inline int32_t Imm12SExtValue() const {
+ int32_t Value = this->Imm12Value() << 20 >> 20;
+ return Value;
+ }
+
+ inline int BranchOffset() const {
+ MOZ_ASSERT(this->InstructionType() == InstructionBase::kBType);
+ // | imm[12|10:5] | rs2 | rs1 | funct3 | imm[4:1|11] | opcode |
+ // 31 25 11 7
+ uint32_t Bits = this->InstructionBits();
+ int16_t imm13 = ((Bits & 0xf00) >> 7) | ((Bits & 0x7e000000) >> 20) |
+ ((Bits & 0x80) << 4) | ((Bits & 0x80000000) >> 19);
+ return imm13 << 19 >> 19;
+ }
+
+ inline int StoreOffset() const {
+ MOZ_ASSERT(this->InstructionType() == InstructionBase::kSType);
+ // | imm[11:5] | rs2 | rs1 | funct3 | imm[4:0] | opcode |
+ // 31 25 11 7
+ uint32_t Bits = this->InstructionBits();
+ int16_t imm12 = ((Bits & 0xf80) >> 7) | ((Bits & 0xfe000000) >> 20);
+ return imm12 << 20 >> 20;
+ }
+
+ inline int Imm20UValue() const {
+ MOZ_ASSERT(this->InstructionType() == InstructionBase::kUType);
+ // | imm[31:12] | rd | opcode |
+ // 31 12
+ int32_t Bits = this->InstructionBits();
+ return Bits >> 12;
+ }
+
+ inline int Imm20JValue() const {
+ MOZ_ASSERT(this->InstructionType() == InstructionBase::kJType);
+ // | imm[20|10:1|11|19:12] | rd | opcode |
+ // 31 12
+ uint32_t Bits = this->InstructionBits();
+ int32_t imm20 = ((Bits & 0x7fe00000) >> 20) | ((Bits & 0x100000) >> 9) |
+ (Bits & 0xff000) | ((Bits & 0x80000000) >> 11);
+ return imm20 << 11 >> 11;
+ }
+
+ inline bool IsArithShift() const {
+ // Valid only for right shift operations
+ MOZ_ASSERT((this->BaseOpcode() == OP || this->BaseOpcode() == OP_32 ||
+ this->BaseOpcode() == OP_IMM ||
+ this->BaseOpcode() == OP_IMM_32) &&
+ this->Funct3Value() == 0b101);
+ return this->InstructionBits() & 0x40000000;
+ }
+
+ inline int Shamt() const {
+ // Valid only for shift instructions (SLLI, SRLI, SRAI)
+ MOZ_ASSERT((this->InstructionBits() & kBaseOpcodeMask) == OP_IMM &&
+ (this->Funct3Value() == 0b001 || this->Funct3Value() == 0b101));
+ // | 0A0000 | shamt | rs1 | funct3 | rd | opcode |
+ // 31 25 20
+ return this->Bits(kImm12Shift + 5, kImm12Shift);
+ }
+
+ inline int Shamt32() const {
+ // Valid only for shift instructions (SLLIW, SRLIW, SRAIW)
+ MOZ_ASSERT((this->InstructionBits() & kBaseOpcodeMask) == OP_IMM_32 &&
+ (this->Funct3Value() == 0b001 || this->Funct3Value() == 0b101));
+ // | 0A00000 | shamt | rs1 | funct3 | rd | opcode |
+ // 31 24 20
+ return this->Bits(kImm12Shift + 4, kImm12Shift);
+ }
+
+ inline int RvcImm6Value() const {
+ MOZ_ASSERT(this->IsShortInstruction());
+ // | funct3 | imm[5] | rs1/rd | imm[4:0] | opcode |
+ // 15 12 6 2
+ uint32_t Bits = this->InstructionBits();
+ int32_t imm6 = ((Bits & 0x1000) >> 7) | ((Bits & 0x7c) >> 2);
+ return imm6 << 26 >> 26;
+ }
+
+ inline int RvcImm6Addi16spValue() const {
+ MOZ_ASSERT(this->IsShortInstruction());
+ // | funct3 | nzimm[9] | 2 | nzimm[4|6|8:7|5] | opcode |
+ // 15 12 6 2
+ uint32_t Bits = this->InstructionBits();
+ int32_t imm10 = ((Bits & 0x1000) >> 3) | ((Bits & 0x40) >> 2) |
+ ((Bits & 0x20) << 1) | ((Bits & 0x18) << 4) |
+ ((Bits & 0x4) << 3);
+ MOZ_ASSERT(imm10 != 0);
+ return imm10 << 22 >> 22;
+ }
+
+ inline int RvcImm8Addi4spnValue() const {
+ MOZ_ASSERT(this->IsShortInstruction());
+ // | funct3 | nzimm[11] | rd' | opcode |
+ // 15 13 5 2
+ uint32_t Bits = this->InstructionBits();
+ int32_t uimm10 = ((Bits & 0x20) >> 2) | ((Bits & 0x40) >> 4) |
+ ((Bits & 0x780) >> 1) | ((Bits & 0x1800) >> 7);
+ MOZ_ASSERT(uimm10 != 0);
+ return uimm10;
+ }
+
+ inline int RvcShamt6() const {
+ MOZ_ASSERT(this->IsShortInstruction());
+ // | funct3 | nzuimm[5] | rs1/rd | nzuimm[4:0] | opcode |
+ // 15 12 6 2
+ int32_t imm6 = this->RvcImm6Value();
+ return imm6 & 0x3f;
+ }
+
+ inline int RvcImm6LwspValue() const {
+ MOZ_ASSERT(this->IsShortInstruction());
+ // | funct3 | uimm[5] | rs1 | uimm[4:2|7:6] | opcode |
+ // 15 12 6 2
+ uint32_t Bits = this->InstructionBits();
+ int32_t imm8 =
+ ((Bits & 0x1000) >> 7) | ((Bits & 0x70) >> 2) | ((Bits & 0xc) << 4);
+ return imm8;
+ }
+
+ inline int RvcImm6LdspValue() const {
+ MOZ_ASSERT(this->IsShortInstruction());
+ // | funct3 | uimm[5] | rs1 | uimm[4:3|8:6] | opcode |
+ // 15 12 6 2
+ uint32_t Bits = this->InstructionBits();
+ int32_t imm9 =
+ ((Bits & 0x1000) >> 7) | ((Bits & 0x60) >> 2) | ((Bits & 0x1c) << 4);
+ return imm9;
+ }
+
+ inline int RvcImm6SwspValue() const {
+ MOZ_ASSERT(this->IsShortInstruction());
+ // | funct3 | uimm[5:2|7:6] | rs2 | opcode |
+ // 15 12 7
+ uint32_t Bits = this->InstructionBits();
+ int32_t imm8 = ((Bits & 0x1e00) >> 7) | ((Bits & 0x180) >> 1);
+ return imm8;
+ }
+
+ inline int RvcImm6SdspValue() const {
+ MOZ_ASSERT(this->IsShortInstruction());
+ // | funct3 | uimm[5:3|8:6] | rs2 | opcode |
+ // 15 12 7
+ uint32_t Bits = this->InstructionBits();
+ int32_t imm9 = ((Bits & 0x1c00) >> 7) | ((Bits & 0x380) >> 1);
+ return imm9;
+ }
+
+ inline int RvcImm5WValue() const {
+ MOZ_ASSERT(this->IsShortInstruction());
+ // | funct3 | imm[5:3] | rs1 | imm[2|6] | rd | opcode |
+ // 15 12 10 6 4 2
+ uint32_t Bits = this->InstructionBits();
+ int32_t imm7 =
+ ((Bits & 0x1c00) >> 7) | ((Bits & 0x40) >> 4) | ((Bits & 0x20) << 1);
+ return imm7;
+ }
+
+ inline int RvcImm5DValue() const {
+ MOZ_ASSERT(this->IsShortInstruction());
+ // | funct3 | imm[5:3] | rs1 | imm[7:6] | rd | opcode |
+ // 15 12 10 6 4 2
+ uint32_t Bits = this->InstructionBits();
+ int32_t imm8 = ((Bits & 0x1c00) >> 7) | ((Bits & 0x60) << 1);
+ return imm8;
+ }
+
+ inline int RvcImm11CJValue() const {
+ MOZ_ASSERT(this->IsShortInstruction());
+ // | funct3 | [11|4|9:8|10|6|7|3:1|5] | opcode |
+ // 15 12 2
+ uint32_t Bits = this->InstructionBits();
+ int32_t imm12 = ((Bits & 0x4) << 3) | ((Bits & 0x38) >> 2) |
+ ((Bits & 0x40) << 1) | ((Bits & 0x80) >> 1) |
+ ((Bits & 0x100) << 2) | ((Bits & 0x600) >> 1) |
+ ((Bits & 0x800) >> 7) | ((Bits & 0x1000) >> 1);
+ return imm12 << 20 >> 20;
+ }
+
+ inline int RvcImm8BValue() const {
+ MOZ_ASSERT(this->IsShortInstruction());
+ // | funct3 | imm[8|4:3] | rs1` | imm[7:6|2:1|5] | opcode |
+ // 15 12 10 7 2
+ uint32_t Bits = this->InstructionBits();
+ int32_t imm9 = ((Bits & 0x4) << 3) | ((Bits & 0x18) >> 2) |
+ ((Bits & 0x60) << 1) | ((Bits & 0xc00) >> 7) |
+ ((Bits & 0x1000) >> 4);
+ return imm9 << 23 >> 23;
+ }
+
+ inline int vl_vs_width() {
+ int width = 0;
+ if ((this->InstructionBits() & kBaseOpcodeMask) != LOAD_FP &&
+ (this->InstructionBits() & kBaseOpcodeMask) != STORE_FP)
+ return -1;
+ switch (this->InstructionBits() & (kRvvWidthMask | kRvvMewMask)) {
+ case 0x0:
+ width = 8;
+ break;
+ case 0x00005000:
+ width = 16;
+ break;
+ case 0x00006000:
+ width = 32;
+ break;
+ case 0x00007000:
+ width = 64;
+ break;
+ case 0x10000000:
+ width = 128;
+ break;
+ case 0x10005000:
+ width = 256;
+ break;
+ case 0x10006000:
+ width = 512;
+ break;
+ case 0x10007000:
+ width = 1024;
+ break;
+ default:
+ width = -1;
+ break;
+ }
+ return width;
+ }
+
+ uint32_t Rvvzimm() const;
+
+ uint32_t Rvvuimm() const;
+
+ inline uint32_t RvvVsew() const {
+ uint32_t zimm = this->Rvvzimm();
+ uint32_t vsew = (zimm >> 3) & 0x7;
+ return vsew;
+ }
+
+ inline uint32_t RvvVlmul() const {
+ uint32_t zimm = this->Rvvzimm();
+ uint32_t vlmul = zimm & 0x7;
+ return vlmul;
+ }
+
+ inline uint8_t RvvVM() const {
+ MOZ_ASSERT(this->InstructionType() == InstructionBase::kVType ||
+ this->InstructionType() == InstructionBase::kIType ||
+ this->InstructionType() == InstructionBase::kSType);
+ return this->Bits(kRvvVmShift + kRvvVmBits - 1, kRvvVmShift);
+ }
+
+ inline const char* RvvSEW() const {
+ uint32_t vsew = this->RvvVsew();
+ switch (vsew) {
+#define CAST_VSEW(name) \
+ case name: \
+ return #name;
+ RVV_SEW(CAST_VSEW)
+ default:
+ return "unknown";
+#undef CAST_VSEW
+ }
+ }
+
+ inline const char* RvvLMUL() const {
+ uint32_t vlmul = this->RvvVlmul();
+ switch (vlmul) {
+#define CAST_VLMUL(name) \
+ case name: \
+ return #name;
+ RVV_LMUL(CAST_VLMUL)
+ default:
+ return "unknown";
+#undef CAST_VLMUL
+ }
+ }
+
+#define sext(x, len) (((int32_t)(x) << (32 - len)) >> (32 - len))
+#define zext(x, len) (((uint32_t)(x) << (32 - len)) >> (32 - len))
+
+ inline int32_t RvvSimm5() const {
+ MOZ_ASSERT(this->InstructionType() == InstructionBase::kVType);
+ return sext(this->Bits(kRvvImm5Shift + kRvvImm5Bits - 1, kRvvImm5Shift),
+ kRvvImm5Bits);
+ }
+
+ inline uint32_t RvvUimm5() const {
+ MOZ_ASSERT(this->InstructionType() == InstructionBase::kVType);
+ uint32_t imm = this->Bits(kRvvImm5Shift + kRvvImm5Bits - 1, kRvvImm5Shift);
+ return zext(imm, kRvvImm5Bits);
+ }
+#undef sext
+#undef zext
+ inline bool AqValue() const { return this->Bits(kAqShift, kAqShift); }
+
+ inline bool RlValue() const { return this->Bits(kRlShift, kRlShift); }
+};
+
+class Instruction : public InstructionGetters<InstructionBase> {
+ public:
+ // Instructions are read of out a code stream. The only way to get a
+ // reference to an instruction is to convert a pointer. There is no way
+ // to allocate or create instances of class Instruction.
+ // Use the At(pc) function to create references to Instruction.
+ static Instruction* At(byte* pc) {
+ return reinterpret_cast<Instruction*>(pc);
+ }
+
+ private:
+ // We need to prevent the creation of instances of class Instruction.
+ Instruction() = delete;
+ Instruction(const Instruction&) = delete;
+ Instruction& operator=(const Instruction&) = delete;
+};
+
+// -----------------------------------------------------------------------------
+// Instructions.
+
+template <class P>
+bool InstructionGetters<P>::IsTrap() const {
+ return (this->InstructionBits() == kBreakInstr);
+}
+
+} // namespace jit
+} // namespace js
+#endif // jit_riscv64_constant_Base_constant_riscv__h_
diff --git a/js/src/jit/riscv64/constant/Constant-riscv-a.h b/js/src/jit/riscv64/constant/Constant-riscv-a.h
new file mode 100644
index 0000000000..718e607240
--- /dev/null
+++ b/js/src/jit/riscv64/constant/Constant-riscv-a.h
@@ -0,0 +1,43 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#ifndef jit_riscv64_constant_Constant_riscv64_a_h_
+#define jit_riscv64_constant_Constant_riscv64_a_h_
+
+#include "jit/riscv64/constant/Base-constant-riscv.h"
+namespace js {
+namespace jit {
+
+enum OpcodeRISCVA : uint32_t {
+ // RV32A Standard Extension
+ RO_LR_W = AMO | (0b010 << kFunct3Shift) | (0b00010 << kFunct5Shift),
+ RO_SC_W = AMO | (0b010 << kFunct3Shift) | (0b00011 << kFunct5Shift),
+ RO_AMOSWAP_W = AMO | (0b010 << kFunct3Shift) | (0b00001 << kFunct5Shift),
+ RO_AMOADD_W = AMO | (0b010 << kFunct3Shift) | (0b00000 << kFunct5Shift),
+ RO_AMOXOR_W = AMO | (0b010 << kFunct3Shift) | (0b00100 << kFunct5Shift),
+ RO_AMOAND_W = AMO | (0b010 << kFunct3Shift) | (0b01100 << kFunct5Shift),
+ RO_AMOOR_W = AMO | (0b010 << kFunct3Shift) | (0b01000 << kFunct5Shift),
+ RO_AMOMIN_W = AMO | (0b010 << kFunct3Shift) | (0b10000 << kFunct5Shift),
+ RO_AMOMAX_W = AMO | (0b010 << kFunct3Shift) | (0b10100 << kFunct5Shift),
+ RO_AMOMINU_W = AMO | (0b010 << kFunct3Shift) | (0b11000 << kFunct5Shift),
+ RO_AMOMAXU_W = AMO | (0b010 << kFunct3Shift) | (0b11100 << kFunct5Shift),
+
+#ifdef JS_CODEGEN_RISCV64
+ // RV64A Standard Extension (in addition to RV32A)
+ RO_LR_D = AMO | (0b011 << kFunct3Shift) | (0b00010 << kFunct5Shift),
+ RO_SC_D = AMO | (0b011 << kFunct3Shift) | (0b00011 << kFunct5Shift),
+ RO_AMOSWAP_D = AMO | (0b011 << kFunct3Shift) | (0b00001 << kFunct5Shift),
+ RO_AMOADD_D = AMO | (0b011 << kFunct3Shift) | (0b00000 << kFunct5Shift),
+ RO_AMOXOR_D = AMO | (0b011 << kFunct3Shift) | (0b00100 << kFunct5Shift),
+ RO_AMOAND_D = AMO | (0b011 << kFunct3Shift) | (0b01100 << kFunct5Shift),
+ RO_AMOOR_D = AMO | (0b011 << kFunct3Shift) | (0b01000 << kFunct5Shift),
+ RO_AMOMIN_D = AMO | (0b011 << kFunct3Shift) | (0b10000 << kFunct5Shift),
+ RO_AMOMAX_D = AMO | (0b011 << kFunct3Shift) | (0b10100 << kFunct5Shift),
+ RO_AMOMINU_D = AMO | (0b011 << kFunct3Shift) | (0b11000 << kFunct5Shift),
+ RO_AMOMAXU_D = AMO | (0b011 << kFunct3Shift) | (0b11100 << kFunct5Shift),
+#endif // JS_CODEGEN_RISCV64
+};
+} // namespace jit
+} // namespace js
+
+#endif // jit_riscv64_constant_Constant_riscv64_a_h_
diff --git a/js/src/jit/riscv64/constant/Constant-riscv-c.h b/js/src/jit/riscv64/constant/Constant-riscv-c.h
new file mode 100644
index 0000000000..a7d4792f5f
--- /dev/null
+++ b/js/src/jit/riscv64/constant/Constant-riscv-c.h
@@ -0,0 +1,61 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#ifndef jit_riscv64_constant_Constant_riscv64_c_h_
+#define jit_riscv64_constant_Constant_riscv64_c_h_
+
+#include "jit/riscv64/constant/Base-constant-riscv.h"
+namespace js {
+namespace jit {
+enum OpcodeRISCVC : uint32_t {
+
+ RO_C_ADDI4SPN = C0 | (0b000 << kRvcFunct3Shift),
+ RO_C_ADDI16SP = C1 | (0b011 << kRvcFunct3Shift),
+ RO_C_LW = C0 | (0b010 << kRvcFunct3Shift),
+ RO_C_SW = C0 | (0b110 << kRvcFunct3Shift),
+ RO_C_NOP_ADDI = C1 | (0b000 << kRvcFunct3Shift),
+ RO_C_LI = C1 | (0b010 << kRvcFunct3Shift),
+ RO_C_SUB = C1 | (0b100011 << kRvcFunct6Shift) | (FUNCT2_0 << kRvcFunct2Shift),
+ RO_C_XOR = C1 | (0b100011 << kRvcFunct6Shift) | (FUNCT2_1 << kRvcFunct2Shift),
+ RO_C_OR = C1 | (0b100011 << kRvcFunct6Shift) | (FUNCT2_2 << kRvcFunct2Shift),
+ RO_C_AND = C1 | (0b100011 << kRvcFunct6Shift) | (FUNCT2_3 << kRvcFunct2Shift),
+ RO_C_LUI_ADD = C1 | (0b011 << kRvcFunct3Shift),
+ RO_C_MISC_ALU = C1 | (0b100 << kRvcFunct3Shift),
+ RO_C_J = C1 | (0b101 << kRvcFunct3Shift),
+ RO_C_BEQZ = C1 | (0b110 << kRvcFunct3Shift),
+ RO_C_BNEZ = C1 | (0b111 << kRvcFunct3Shift),
+ RO_C_SLLI = C2 | (0b000 << kRvcFunct3Shift),
+ RO_C_LWSP = C2 | (0b010 << kRvcFunct3Shift),
+ RO_C_JR_MV_ADD = C2 | (0b100 << kRvcFunct3Shift),
+ RO_C_JR = C2 | (0b1000 << kRvcFunct4Shift),
+ RO_C_MV = C2 | (0b1000 << kRvcFunct4Shift),
+ RO_C_EBREAK = C2 | (0b1001 << kRvcFunct4Shift),
+ RO_C_JALR = C2 | (0b1001 << kRvcFunct4Shift),
+ RO_C_ADD = C2 | (0b1001 << kRvcFunct4Shift),
+ RO_C_SWSP = C2 | (0b110 << kRvcFunct3Shift),
+
+ RO_C_FSD = C0 | (0b101 << kRvcFunct3Shift),
+ RO_C_FLD = C0 | (0b001 << kRvcFunct3Shift),
+ RO_C_FLDSP = C2 | (0b001 << kRvcFunct3Shift),
+ RO_C_FSDSP = C2 | (0b101 << kRvcFunct3Shift),
+#ifdef JS_CODEGEN_RISCV64
+ RO_C_LD = C0 | (0b011 << kRvcFunct3Shift),
+ RO_C_SD = C0 | (0b111 << kRvcFunct3Shift),
+ RO_C_LDSP = C2 | (0b011 << kRvcFunct3Shift),
+ RO_C_SDSP = C2 | (0b111 << kRvcFunct3Shift),
+ RO_C_ADDIW = C1 | (0b001 << kRvcFunct3Shift),
+ RO_C_SUBW =
+ C1 | (0b100111 << kRvcFunct6Shift) | (FUNCT2_0 << kRvcFunct2Shift),
+ RO_C_ADDW =
+ C1 | (0b100111 << kRvcFunct6Shift) | (FUNCT2_1 << kRvcFunct2Shift),
+#endif
+#ifdef JS_CODEGEN_RISCV32
+ RO_C_FLWSP = C2 | (0b011 << kRvcFunct3Shift),
+ RO_C_FSWSP = C2 | (0b111 << kRvcFunct3Shift),
+ RO_C_FLW = C0 | (0b011 << kRvcFunct3Shift),
+ RO_C_FSW = C0 | (0b111 << kRvcFunct3Shift),
+#endif
+};
+} // namespace jit
+} // namespace js
+#endif // jit_riscv64_constant_Constant_riscv64_c_h_
diff --git a/js/src/jit/riscv64/constant/Constant-riscv-d.h b/js/src/jit/riscv64/constant/Constant-riscv-d.h
new file mode 100644
index 0000000000..d97e44ffe5
--- /dev/null
+++ b/js/src/jit/riscv64/constant/Constant-riscv-d.h
@@ -0,0 +1,55 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#ifndef jit_riscv64_constant_Constant_riscv64_d_h_
+#define jit_riscv64_constant_Constant_riscv64_d_h_
+#include "jit/riscv64/constant/Base-constant-riscv.h"
+namespace js {
+namespace jit {
+
+enum OpcodeRISCVD : uint32_t {
+ // RV32D Standard Extension
+ RO_FLD = LOAD_FP | (0b011 << kFunct3Shift),
+ RO_FSD = STORE_FP | (0b011 << kFunct3Shift),
+ RO_FMADD_D = MADD | (0b01 << kFunct2Shift),
+ RO_FMSUB_D = MSUB | (0b01 << kFunct2Shift),
+ RO_FNMSUB_D = NMSUB | (0b01 << kFunct2Shift),
+ RO_FNMADD_D = NMADD | (0b01 << kFunct2Shift),
+ RO_FADD_D = OP_FP | (0b0000001 << kFunct7Shift),
+ RO_FSUB_D = OP_FP | (0b0000101 << kFunct7Shift),
+ RO_FMUL_D = OP_FP | (0b0001001 << kFunct7Shift),
+ RO_FDIV_D = OP_FP | (0b0001101 << kFunct7Shift),
+ RO_FSQRT_D = OP_FP | (0b0101101 << kFunct7Shift) | (0b00000 << kRs2Shift),
+ RO_FSGNJ_D = OP_FP | (0b000 << kFunct3Shift) | (0b0010001 << kFunct7Shift),
+ RO_FSGNJN_D = OP_FP | (0b001 << kFunct3Shift) | (0b0010001 << kFunct7Shift),
+ RO_FSQNJX_D = OP_FP | (0b010 << kFunct3Shift) | (0b0010001 << kFunct7Shift),
+ RO_FMIN_D = OP_FP | (0b000 << kFunct3Shift) | (0b0010101 << kFunct7Shift),
+ RO_FMAX_D = OP_FP | (0b001 << kFunct3Shift) | (0b0010101 << kFunct7Shift),
+ RO_FCVT_S_D = OP_FP | (0b0100000 << kFunct7Shift) | (0b00001 << kRs2Shift),
+ RO_FCVT_D_S = OP_FP | (0b0100001 << kFunct7Shift) | (0b00000 << kRs2Shift),
+ RO_FEQ_D = OP_FP | (0b010 << kFunct3Shift) | (0b1010001 << kFunct7Shift),
+ RO_FLT_D = OP_FP | (0b001 << kFunct3Shift) | (0b1010001 << kFunct7Shift),
+ RO_FLE_D = OP_FP | (0b000 << kFunct3Shift) | (0b1010001 << kFunct7Shift),
+ RO_FCLASS_D = OP_FP | (0b001 << kFunct3Shift) | (0b1110001 << kFunct7Shift) |
+ (0b00000 << kRs2Shift),
+ RO_FCVT_W_D = OP_FP | (0b1100001 << kFunct7Shift) | (0b00000 << kRs2Shift),
+ RO_FCVT_WU_D = OP_FP | (0b1100001 << kFunct7Shift) | (0b00001 << kRs2Shift),
+ RO_FCVT_D_W = OP_FP | (0b1101001 << kFunct7Shift) | (0b00000 << kRs2Shift),
+ RO_FCVT_D_WU = OP_FP | (0b1101001 << kFunct7Shift) | (0b00001 << kRs2Shift),
+
+#ifdef JS_CODEGEN_RISCV64
+ // RV64D Standard Extension (in addition to RV32D)
+ RO_FCVT_L_D = OP_FP | (0b1100001 << kFunct7Shift) | (0b00010 << kRs2Shift),
+ RO_FCVT_LU_D = OP_FP | (0b1100001 << kFunct7Shift) | (0b00011 << kRs2Shift),
+ RO_FMV_X_D = OP_FP | (0b000 << kFunct3Shift) | (0b1110001 << kFunct7Shift) |
+ (0b00000 << kRs2Shift),
+ RO_FCVT_D_L = OP_FP | (0b1101001 << kFunct7Shift) | (0b00010 << kRs2Shift),
+ RO_FCVT_D_LU = OP_FP | (0b1101001 << kFunct7Shift) | (0b00011 << kRs2Shift),
+ RO_FMV_D_X = OP_FP | (0b000 << kFunct3Shift) | (0b1111001 << kFunct7Shift) |
+ (0b00000 << kRs2Shift),
+#endif
+};
+} // namespace jit
+} // namespace js
+
+#endif // jit_riscv64_constant_Constant_riscv64_a_h_
diff --git a/js/src/jit/riscv64/constant/Constant-riscv-f.h b/js/src/jit/riscv64/constant/Constant-riscv-f.h
new file mode 100644
index 0000000000..28c96394e2
--- /dev/null
+++ b/js/src/jit/riscv64/constant/Constant-riscv-f.h
@@ -0,0 +1,51 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#ifndef jit_riscv64_constant_Constant_riscv64_f_h_
+#define jit_riscv64_constant_Constant_riscv64_f_h_
+#include "jit/riscv64/constant/Base-constant-riscv.h"
+namespace js {
+namespace jit {
+
+enum OpcodeRISCVF : uint32_t {
+ // RV32F Standard Extension
+ RO_FLW = LOAD_FP | (0b010 << kFunct3Shift),
+ RO_FSW = STORE_FP | (0b010 << kFunct3Shift),
+ RO_FMADD_S = MADD | (0b00 << kFunct2Shift),
+ RO_FMSUB_S = MSUB | (0b00 << kFunct2Shift),
+ RO_FNMSUB_S = NMSUB | (0b00 << kFunct2Shift),
+ RO_FNMADD_S = NMADD | (0b00 << kFunct2Shift),
+ RO_FADD_S = OP_FP | (0b0000000 << kFunct7Shift),
+ RO_FSUB_S = OP_FP | (0b0000100 << kFunct7Shift),
+ RO_FMUL_S = OP_FP | (0b0001000 << kFunct7Shift),
+ RO_FDIV_S = OP_FP | (0b0001100 << kFunct7Shift),
+ RO_FSQRT_S = OP_FP | (0b0101100 << kFunct7Shift) | (0b00000 << kRs2Shift),
+ RO_FSGNJ_S = OP_FP | (0b000 << kFunct3Shift) | (0b0010000 << kFunct7Shift),
+ RO_FSGNJN_S = OP_FP | (0b001 << kFunct3Shift) | (0b0010000 << kFunct7Shift),
+ RO_FSQNJX_S = OP_FP | (0b010 << kFunct3Shift) | (0b0010000 << kFunct7Shift),
+ RO_FMIN_S = OP_FP | (0b000 << kFunct3Shift) | (0b0010100 << kFunct7Shift),
+ RO_FMAX_S = OP_FP | (0b001 << kFunct3Shift) | (0b0010100 << kFunct7Shift),
+ RO_FCVT_W_S = OP_FP | (0b1100000 << kFunct7Shift) | (0b00000 << kRs2Shift),
+ RO_FCVT_WU_S = OP_FP | (0b1100000 << kFunct7Shift) | (0b00001 << kRs2Shift),
+ RO_FMV = OP_FP | (0b1110000 << kFunct7Shift) | (0b000 << kFunct3Shift) |
+ (0b00000 << kRs2Shift),
+ RO_FEQ_S = OP_FP | (0b010 << kFunct3Shift) | (0b1010000 << kFunct7Shift),
+ RO_FLT_S = OP_FP | (0b001 << kFunct3Shift) | (0b1010000 << kFunct7Shift),
+ RO_FLE_S = OP_FP | (0b000 << kFunct3Shift) | (0b1010000 << kFunct7Shift),
+ RO_FCLASS_S = OP_FP | (0b001 << kFunct3Shift) | (0b1110000 << kFunct7Shift),
+ RO_FCVT_S_W = OP_FP | (0b1101000 << kFunct7Shift) | (0b00000 << kRs2Shift),
+ RO_FCVT_S_WU = OP_FP | (0b1101000 << kFunct7Shift) | (0b00001 << kRs2Shift),
+ RO_FMV_W_X = OP_FP | (0b000 << kFunct3Shift) | (0b1111000 << kFunct7Shift),
+
+#ifdef JS_CODEGEN_RISCV64
+ // RV64F Standard Extension (in addition to RV32F)
+ RO_FCVT_L_S = OP_FP | (0b1100000 << kFunct7Shift) | (0b00010 << kRs2Shift),
+ RO_FCVT_LU_S = OP_FP | (0b1100000 << kFunct7Shift) | (0b00011 << kRs2Shift),
+ RO_FCVT_S_L = OP_FP | (0b1101000 << kFunct7Shift) | (0b00010 << kRs2Shift),
+ RO_FCVT_S_LU = OP_FP | (0b1101000 << kFunct7Shift) | (0b00011 << kRs2Shift),
+#endif // JS_CODEGEN_RISCV64
+};
+} // namespace jit
+} // namespace js
+
+#endif // jit_riscv64_constant_Constant_riscv64_f_h_
diff --git a/js/src/jit/riscv64/constant/Constant-riscv-i.h b/js/src/jit/riscv64/constant/Constant-riscv-i.h
new file mode 100644
index 0000000000..586ffd8a14
--- /dev/null
+++ b/js/src/jit/riscv64/constant/Constant-riscv-i.h
@@ -0,0 +1,73 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#ifndef jit_riscv64_constant_Constant_riscv64_i_h_
+#define jit_riscv64_constant_Constant_riscv64_i_h_
+#include "jit/riscv64/constant/Base-constant-riscv.h"
+namespace js {
+namespace jit {
+
+enum OpcodeRISCV32I : uint32_t {
+ // Note use RO (RiscV Opcode) prefix
+ // RV32I Base Instruction Set
+ RO_LUI = LUI,
+ RO_AUIPC = AUIPC,
+ RO_JAL = JAL,
+ RO_JALR = JALR | (0b000 << kFunct3Shift),
+ RO_BEQ = BRANCH | (0b000 << kFunct3Shift),
+ RO_BNE = BRANCH | (0b001 << kFunct3Shift),
+ RO_BLT = BRANCH | (0b100 << kFunct3Shift),
+ RO_BGE = BRANCH | (0b101 << kFunct3Shift),
+ RO_BLTU = BRANCH | (0b110 << kFunct3Shift),
+ RO_BGEU = BRANCH | (0b111 << kFunct3Shift),
+ RO_LB = LOAD | (0b000 << kFunct3Shift),
+ RO_LH = LOAD | (0b001 << kFunct3Shift),
+ RO_LW = LOAD | (0b010 << kFunct3Shift),
+ RO_LBU = LOAD | (0b100 << kFunct3Shift),
+ RO_LHU = LOAD | (0b101 << kFunct3Shift),
+ RO_SB = STORE | (0b000 << kFunct3Shift),
+ RO_SH = STORE | (0b001 << kFunct3Shift),
+ RO_SW = STORE | (0b010 << kFunct3Shift),
+ RO_ADDI = OP_IMM | (0b000 << kFunct3Shift),
+ RO_SLTI = OP_IMM | (0b010 << kFunct3Shift),
+ RO_SLTIU = OP_IMM | (0b011 << kFunct3Shift),
+ RO_XORI = OP_IMM | (0b100 << kFunct3Shift),
+ RO_ORI = OP_IMM | (0b110 << kFunct3Shift),
+ RO_ANDI = OP_IMM | (0b111 << kFunct3Shift),
+ RO_SLLI = OP_IMM | (0b001 << kFunct3Shift),
+ RO_SRLI = OP_IMM | (0b101 << kFunct3Shift),
+ // RO_SRAI = OP_IMM | (0b101 << kFunct3Shift), // Same as SRLI, use func7
+ RO_ADD = OP | (0b000 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
+ RO_SUB = OP | (0b000 << kFunct3Shift) | (0b0100000 << kFunct7Shift),
+ RO_SLL = OP | (0b001 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
+ RO_SLT = OP | (0b010 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
+ RO_SLTU = OP | (0b011 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
+ RO_XOR = OP | (0b100 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
+ RO_SRL = OP | (0b101 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
+ RO_SRA = OP | (0b101 << kFunct3Shift) | (0b0100000 << kFunct7Shift),
+ RO_OR = OP | (0b110 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
+ RO_AND = OP | (0b111 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
+ RO_FENCE = MISC_MEM | (0b000 << kFunct3Shift),
+ RO_ECALL = SYSTEM | (0b000 << kFunct3Shift),
+// RO_EBREAK = SYSTEM | (0b000 << kFunct3Shift), // Same as ECALL, use imm12
+
+#if JS_CODEGEN_RISCV64
+ // RV64I Base Instruction Set (in addition to RV32I)
+ RO_LWU = LOAD | (0b110 << kFunct3Shift),
+ RO_LD = LOAD | (0b011 << kFunct3Shift),
+ RO_SD = STORE | (0b011 << kFunct3Shift),
+ RO_ADDIW = OP_IMM_32 | (0b000 << kFunct3Shift),
+ RO_SLLIW = OP_IMM_32 | (0b001 << kFunct3Shift),
+ RO_SRLIW = OP_IMM_32 | (0b101 << kFunct3Shift),
+ // RO_SRAIW = OP_IMM_32 | (0b101 << kFunct3Shift), // Same as SRLIW, use func7
+ RO_ADDW = OP_32 | (0b000 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
+ RO_SUBW = OP_32 | (0b000 << kFunct3Shift) | (0b0100000 << kFunct7Shift),
+ RO_SLLW = OP_32 | (0b001 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
+ RO_SRLW = OP_32 | (0b101 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
+ RO_SRAW = OP_32 | (0b101 << kFunct3Shift) | (0b0100000 << kFunct7Shift),
+#endif
+};
+} // namespace jit
+} // namespace js
+
+#endif // jit_riscv64_constant_Constant_riscv64_i_h_
diff --git a/js/src/jit/riscv64/constant/Constant-riscv-m.h b/js/src/jit/riscv64/constant/Constant-riscv-m.h
new file mode 100644
index 0000000000..81a69dab41
--- /dev/null
+++ b/js/src/jit/riscv64/constant/Constant-riscv-m.h
@@ -0,0 +1,34 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#ifndef jit_riscv64_constant_Constant_riscv64_m_h_
+#define jit_riscv64_constant_Constant_riscv64_m_h_
+
+#include "jit/riscv64/constant/Base-constant-riscv.h"
+namespace js {
+namespace jit {
+
+enum OpcodeRISCVM : uint32_t {
+ // RV32M Standard Extension
+ RO_MUL = OP | (0b000 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
+ RO_MULH = OP | (0b001 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
+ RO_MULHSU = OP | (0b010 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
+ RO_MULHU = OP | (0b011 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
+ RO_DIV = OP | (0b100 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
+ RO_DIVU = OP | (0b101 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
+ RO_REM = OP | (0b110 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
+ RO_REMU = OP | (0b111 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
+
+#ifdef JS_CODEGEN_RISCV64
+ // RV64M Standard Extension (in addition to RV32M)
+ RO_MULW = OP_32 | (0b000 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
+ RO_DIVW = OP_32 | (0b100 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
+ RO_DIVUW = OP_32 | (0b101 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
+ RO_REMW = OP_32 | (0b110 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
+ RO_REMUW = OP_32 | (0b111 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
+#endif
+};
+} // namespace jit
+} // namespace js
+
+#endif // jit_riscv64_extension_CONSTANT_RISCV_M_h_
diff --git a/js/src/jit/riscv64/constant/Constant-riscv-v.h b/js/src/jit/riscv64/constant/Constant-riscv-v.h
new file mode 100644
index 0000000000..cca3540efd
--- /dev/null
+++ b/js/src/jit/riscv64/constant/Constant-riscv-v.h
@@ -0,0 +1,508 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#ifndef jit_riscv64_constant_Constant_riscv64_v_h_
+#define jit_riscv64_constant_Constant_riscv64_v_h_
+#include "jit/riscv64/constant/Base-constant-riscv.h"
+namespace js {
+namespace jit {
+
+namespace RVV {
+enum TailAgnosticType {
+ ta = 0x1, // Tail agnostic
+ tu = 0x0, // Tail undisturbed
+};
+
+enum MaskAgnosticType {
+ ma = 0x1, // Mask agnostic
+ mu = 0x0, // Mask undisturbed
+};
+enum MaskType {
+ Mask = 0x0, // use the mask
+ NoMask = 0x1,
+};
+} // namespace RVV
+
+enum OpcodeRISCVV : uint32_t {
+ // RVV Extension
+ OP_IVV = OP_V | (0b000 << kFunct3Shift),
+ OP_FVV = OP_V | (0b001 << kFunct3Shift),
+ OP_MVV = OP_V | (0b010 << kFunct3Shift),
+ OP_IVI = OP_V | (0b011 << kFunct3Shift),
+ OP_IVX = OP_V | (0b100 << kFunct3Shift),
+ OP_FVF = OP_V | (0b101 << kFunct3Shift),
+ OP_MVX = OP_V | (0b110 << kFunct3Shift),
+
+ RO_V_VSETVLI = OP_V | (0b111 << kFunct3Shift) | 0b0 << 31,
+ RO_V_VSETIVLI = OP_V | (0b111 << kFunct3Shift) | 0b11 << 30,
+ RO_V_VSETVL = OP_V | (0b111 << kFunct3Shift) | 0b1 << 31,
+
+ // RVV LOAD/STORE
+ RO_V_VL = LOAD_FP | (0b00 << kRvvMopShift) | (0b000 << kRvvNfShift),
+ RO_V_VLS = LOAD_FP | (0b10 << kRvvMopShift) | (0b000 << kRvvNfShift),
+ RO_V_VLX = LOAD_FP | (0b11 << kRvvMopShift) | (0b000 << kRvvNfShift),
+
+ RO_V_VS = STORE_FP | (0b00 << kRvvMopShift) | (0b000 << kRvvNfShift),
+ RO_V_VSS = STORE_FP | (0b10 << kRvvMopShift) | (0b000 << kRvvNfShift),
+ RO_V_VSX = STORE_FP | (0b11 << kRvvMopShift) | (0b000 << kRvvNfShift),
+ RO_V_VSU = STORE_FP | (0b01 << kRvvMopShift) | (0b000 << kRvvNfShift),
+ // THE kFunct6Shift is mop
+ RO_V_VLSEG2 = LOAD_FP | (0b00 << kRvvMopShift) | (0b001 << kRvvNfShift),
+ RO_V_VLSEG3 = LOAD_FP | (0b00 << kRvvMopShift) | (0b010 << kRvvNfShift),
+ RO_V_VLSEG4 = LOAD_FP | (0b00 << kRvvMopShift) | (0b011 << kRvvNfShift),
+ RO_V_VLSEG5 = LOAD_FP | (0b00 << kRvvMopShift) | (0b100 << kRvvNfShift),
+ RO_V_VLSEG6 = LOAD_FP | (0b00 << kRvvMopShift) | (0b101 << kRvvNfShift),
+ RO_V_VLSEG7 = LOAD_FP | (0b00 << kRvvMopShift) | (0b110 << kRvvNfShift),
+ RO_V_VLSEG8 = LOAD_FP | (0b00 << kRvvMopShift) | (0b111 << kRvvNfShift),
+
+ RO_V_VSSEG2 = STORE_FP | (0b00 << kRvvMopShift) | (0b001 << kRvvNfShift),
+ RO_V_VSSEG3 = STORE_FP | (0b00 << kRvvMopShift) | (0b010 << kRvvNfShift),
+ RO_V_VSSEG4 = STORE_FP | (0b00 << kRvvMopShift) | (0b011 << kRvvNfShift),
+ RO_V_VSSEG5 = STORE_FP | (0b00 << kRvvMopShift) | (0b100 << kRvvNfShift),
+ RO_V_VSSEG6 = STORE_FP | (0b00 << kRvvMopShift) | (0b101 << kRvvNfShift),
+ RO_V_VSSEG7 = STORE_FP | (0b00 << kRvvMopShift) | (0b110 << kRvvNfShift),
+ RO_V_VSSEG8 = STORE_FP | (0b00 << kRvvMopShift) | (0b111 << kRvvNfShift),
+
+ RO_V_VLSSEG2 = LOAD_FP | (0b10 << kRvvMopShift) | (0b001 << kRvvNfShift),
+ RO_V_VLSSEG3 = LOAD_FP | (0b10 << kRvvMopShift) | (0b010 << kRvvNfShift),
+ RO_V_VLSSEG4 = LOAD_FP | (0b10 << kRvvMopShift) | (0b011 << kRvvNfShift),
+ RO_V_VLSSEG5 = LOAD_FP | (0b10 << kRvvMopShift) | (0b100 << kRvvNfShift),
+ RO_V_VLSSEG6 = LOAD_FP | (0b10 << kRvvMopShift) | (0b101 << kRvvNfShift),
+ RO_V_VLSSEG7 = LOAD_FP | (0b10 << kRvvMopShift) | (0b110 << kRvvNfShift),
+ RO_V_VLSSEG8 = LOAD_FP | (0b10 << kRvvMopShift) | (0b111 << kRvvNfShift),
+
+ RO_V_VSSSEG2 = STORE_FP | (0b10 << kRvvMopShift) | (0b001 << kRvvNfShift),
+ RO_V_VSSSEG3 = STORE_FP | (0b10 << kRvvMopShift) | (0b010 << kRvvNfShift),
+ RO_V_VSSSEG4 = STORE_FP | (0b10 << kRvvMopShift) | (0b011 << kRvvNfShift),
+ RO_V_VSSSEG5 = STORE_FP | (0b10 << kRvvMopShift) | (0b100 << kRvvNfShift),
+ RO_V_VSSSEG6 = STORE_FP | (0b10 << kRvvMopShift) | (0b101 << kRvvNfShift),
+ RO_V_VSSSEG7 = STORE_FP | (0b10 << kRvvMopShift) | (0b110 << kRvvNfShift),
+ RO_V_VSSSEG8 = STORE_FP | (0b10 << kRvvMopShift) | (0b111 << kRvvNfShift),
+
+ RO_V_VLXSEG2 = LOAD_FP | (0b11 << kRvvMopShift) | (0b001 << kRvvNfShift),
+ RO_V_VLXSEG3 = LOAD_FP | (0b11 << kRvvMopShift) | (0b010 << kRvvNfShift),
+ RO_V_VLXSEG4 = LOAD_FP | (0b11 << kRvvMopShift) | (0b011 << kRvvNfShift),
+ RO_V_VLXSEG5 = LOAD_FP | (0b11 << kRvvMopShift) | (0b100 << kRvvNfShift),
+ RO_V_VLXSEG6 = LOAD_FP | (0b11 << kRvvMopShift) | (0b101 << kRvvNfShift),
+ RO_V_VLXSEG7 = LOAD_FP | (0b11 << kRvvMopShift) | (0b110 << kRvvNfShift),
+ RO_V_VLXSEG8 = LOAD_FP | (0b11 << kRvvMopShift) | (0b111 << kRvvNfShift),
+
+ RO_V_VSXSEG2 = STORE_FP | (0b11 << kRvvMopShift) | (0b001 << kRvvNfShift),
+ RO_V_VSXSEG3 = STORE_FP | (0b11 << kRvvMopShift) | (0b010 << kRvvNfShift),
+ RO_V_VSXSEG4 = STORE_FP | (0b11 << kRvvMopShift) | (0b011 << kRvvNfShift),
+ RO_V_VSXSEG5 = STORE_FP | (0b11 << kRvvMopShift) | (0b100 << kRvvNfShift),
+ RO_V_VSXSEG6 = STORE_FP | (0b11 << kRvvMopShift) | (0b101 << kRvvNfShift),
+ RO_V_VSXSEG7 = STORE_FP | (0b11 << kRvvMopShift) | (0b110 << kRvvNfShift),
+ RO_V_VSXSEG8 = STORE_FP | (0b11 << kRvvMopShift) | (0b111 << kRvvNfShift),
+
+ // RVV Vector Arithmetic Instruction
+ VADD_FUNCT6 = 0b000000,
+ RO_V_VADD_VI = OP_IVI | (VADD_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VADD_VV = OP_IVV | (VADD_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VADD_VX = OP_IVX | (VADD_FUNCT6 << kRvvFunct6Shift),
+
+ VSUB_FUNCT6 = 0b000010,
+ RO_V_VSUB_VX = OP_IVX | (VSUB_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSUB_VV = OP_IVV | (VSUB_FUNCT6 << kRvvFunct6Shift),
+
+ VDIVU_FUNCT6 = 0b100000,
+ RO_V_VDIVU_VX = OP_MVX | (VDIVU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VDIVU_VV = OP_MVV | (VDIVU_FUNCT6 << kRvvFunct6Shift),
+
+ VDIV_FUNCT6 = 0b100001,
+ RO_V_VDIV_VX = OP_MVX | (VDIV_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VDIV_VV = OP_MVV | (VDIV_FUNCT6 << kRvvFunct6Shift),
+
+ VREMU_FUNCT6 = 0b100010,
+ RO_V_VREMU_VX = OP_MVX | (VREMU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VREMU_VV = OP_MVV | (VREMU_FUNCT6 << kRvvFunct6Shift),
+
+ VREM_FUNCT6 = 0b100011,
+ RO_V_VREM_VX = OP_MVX | (VREM_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VREM_VV = OP_MVV | (VREM_FUNCT6 << kRvvFunct6Shift),
+
+ VMULHU_FUNCT6 = 0b100100,
+ RO_V_VMULHU_VX = OP_MVX | (VMULHU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMULHU_VV = OP_MVV | (VMULHU_FUNCT6 << kRvvFunct6Shift),
+
+ VMUL_FUNCT6 = 0b100101,
+ RO_V_VMUL_VX = OP_MVX | (VMUL_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMUL_VV = OP_MVV | (VMUL_FUNCT6 << kRvvFunct6Shift),
+
+ VWMUL_FUNCT6 = 0b111011,
+ RO_V_VWMUL_VX = OP_MVX | (VWMUL_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VWMUL_VV = OP_MVV | (VWMUL_FUNCT6 << kRvvFunct6Shift),
+
+ VWMULU_FUNCT6 = 0b111000,
+ RO_V_VWMULU_VX = OP_MVX | (VWMULU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VWMULU_VV = OP_MVV | (VWMULU_FUNCT6 << kRvvFunct6Shift),
+
+ VMULHSU_FUNCT6 = 0b100110,
+ RO_V_VMULHSU_VX = OP_MVX | (VMULHSU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMULHSU_VV = OP_MVV | (VMULHSU_FUNCT6 << kRvvFunct6Shift),
+
+ VMULH_FUNCT6 = 0b100111,
+ RO_V_VMULH_VX = OP_MVX | (VMULH_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMULH_VV = OP_MVV | (VMULH_FUNCT6 << kRvvFunct6Shift),
+
+ VWADD_FUNCT6 = 0b110001,
+ RO_V_VWADD_VV = OP_MVV | (VWADD_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VWADD_VX = OP_MVX | (VWADD_FUNCT6 << kRvvFunct6Shift),
+
+ VWADDU_FUNCT6 = 0b110000,
+ RO_V_VWADDU_VV = OP_MVV | (VWADDU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VWADDU_VX = OP_MVX | (VWADDU_FUNCT6 << kRvvFunct6Shift),
+
+ VWADDUW_FUNCT6 = 0b110101,
+ RO_V_VWADDUW_VX = OP_MVX | (VWADDUW_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VWADDUW_VV = OP_MVV | (VWADDUW_FUNCT6 << kRvvFunct6Shift),
+
+ VCOMPRESS_FUNCT6 = 0b010111,
+ RO_V_VCOMPRESS_VV = OP_MVV | (VCOMPRESS_FUNCT6 << kRvvFunct6Shift),
+
+ VSADDU_FUNCT6 = 0b100000,
+ RO_V_VSADDU_VI = OP_IVI | (VSADDU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSADDU_VV = OP_IVV | (VSADDU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSADDU_VX = OP_IVX | (VSADDU_FUNCT6 << kRvvFunct6Shift),
+
+ VSADD_FUNCT6 = 0b100001,
+ RO_V_VSADD_VI = OP_IVI | (VSADD_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSADD_VV = OP_IVV | (VSADD_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSADD_VX = OP_IVX | (VSADD_FUNCT6 << kRvvFunct6Shift),
+
+ VSSUB_FUNCT6 = 0b100011,
+ RO_V_VSSUB_VV = OP_IVV | (VSSUB_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSSUB_VX = OP_IVX | (VSSUB_FUNCT6 << kRvvFunct6Shift),
+
+ VSSUBU_FUNCT6 = 0b100010,
+ RO_V_VSSUBU_VV = OP_IVV | (VSSUBU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSSUBU_VX = OP_IVX | (VSSUBU_FUNCT6 << kRvvFunct6Shift),
+
+ VRSUB_FUNCT6 = 0b000011,
+ RO_V_VRSUB_VX = OP_IVX | (VRSUB_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VRSUB_VI = OP_IVI | (VRSUB_FUNCT6 << kRvvFunct6Shift),
+
+ VMINU_FUNCT6 = 0b000100,
+ RO_V_VMINU_VX = OP_IVX | (VMINU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMINU_VV = OP_IVV | (VMINU_FUNCT6 << kRvvFunct6Shift),
+
+ VMIN_FUNCT6 = 0b000101,
+ RO_V_VMIN_VX = OP_IVX | (VMIN_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMIN_VV = OP_IVV | (VMIN_FUNCT6 << kRvvFunct6Shift),
+
+ VMAXU_FUNCT6 = 0b000110,
+ RO_V_VMAXU_VX = OP_IVX | (VMAXU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMAXU_VV = OP_IVV | (VMAXU_FUNCT6 << kRvvFunct6Shift),
+
+ VMAX_FUNCT6 = 0b000111,
+ RO_V_VMAX_VX = OP_IVX | (VMAX_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMAX_VV = OP_IVV | (VMAX_FUNCT6 << kRvvFunct6Shift),
+
+ VAND_FUNCT6 = 0b001001,
+ RO_V_VAND_VI = OP_IVI | (VAND_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VAND_VV = OP_IVV | (VAND_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VAND_VX = OP_IVX | (VAND_FUNCT6 << kRvvFunct6Shift),
+
+ VOR_FUNCT6 = 0b001010,
+ RO_V_VOR_VI = OP_IVI | (VOR_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VOR_VV = OP_IVV | (VOR_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VOR_VX = OP_IVX | (VOR_FUNCT6 << kRvvFunct6Shift),
+
+ VXOR_FUNCT6 = 0b001011,
+ RO_V_VXOR_VI = OP_IVI | (VXOR_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VXOR_VV = OP_IVV | (VXOR_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VXOR_VX = OP_IVX | (VXOR_FUNCT6 << kRvvFunct6Shift),
+
+ VRGATHER_FUNCT6 = 0b001100,
+ RO_V_VRGATHER_VI = OP_IVI | (VRGATHER_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VRGATHER_VV = OP_IVV | (VRGATHER_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VRGATHER_VX = OP_IVX | (VRGATHER_FUNCT6 << kRvvFunct6Shift),
+
+ VMV_FUNCT6 = 0b010111,
+ RO_V_VMV_VI = OP_IVI | (VMV_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMV_VV = OP_IVV | (VMV_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMV_VX = OP_IVX | (VMV_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFMV_VF = OP_FVF | (VMV_FUNCT6 << kRvvFunct6Shift),
+
+ RO_V_VMERGE_VI = RO_V_VMV_VI,
+ RO_V_VMERGE_VV = RO_V_VMV_VV,
+ RO_V_VMERGE_VX = RO_V_VMV_VX,
+
+ VMSEQ_FUNCT6 = 0b011000,
+ RO_V_VMSEQ_VI = OP_IVI | (VMSEQ_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSEQ_VV = OP_IVV | (VMSEQ_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSEQ_VX = OP_IVX | (VMSEQ_FUNCT6 << kRvvFunct6Shift),
+
+ VMSNE_FUNCT6 = 0b011001,
+ RO_V_VMSNE_VI = OP_IVI | (VMSNE_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSNE_VV = OP_IVV | (VMSNE_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSNE_VX = OP_IVX | (VMSNE_FUNCT6 << kRvvFunct6Shift),
+
+ VMSLTU_FUNCT6 = 0b011010,
+ RO_V_VMSLTU_VV = OP_IVV | (VMSLTU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSLTU_VX = OP_IVX | (VMSLTU_FUNCT6 << kRvvFunct6Shift),
+
+ VMSLT_FUNCT6 = 0b011011,
+ RO_V_VMSLT_VV = OP_IVV | (VMSLT_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSLT_VX = OP_IVX | (VMSLT_FUNCT6 << kRvvFunct6Shift),
+
+ VMSLE_FUNCT6 = 0b011101,
+ RO_V_VMSLE_VI = OP_IVI | (VMSLE_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSLE_VV = OP_IVV | (VMSLE_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSLE_VX = OP_IVX | (VMSLE_FUNCT6 << kRvvFunct6Shift),
+
+ VMSLEU_FUNCT6 = 0b011100,
+ RO_V_VMSLEU_VI = OP_IVI | (VMSLEU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSLEU_VV = OP_IVV | (VMSLEU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSLEU_VX = OP_IVX | (VMSLEU_FUNCT6 << kRvvFunct6Shift),
+
+ VMSGTU_FUNCT6 = 0b011110,
+ RO_V_VMSGTU_VI = OP_IVI | (VMSGTU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSGTU_VX = OP_IVX | (VMSGTU_FUNCT6 << kRvvFunct6Shift),
+
+ VMSGT_FUNCT6 = 0b011111,
+ RO_V_VMSGT_VI = OP_IVI | (VMSGT_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSGT_VX = OP_IVX | (VMSGT_FUNCT6 << kRvvFunct6Shift),
+
+ VSLIDEUP_FUNCT6 = 0b001110,
+ RO_V_VSLIDEUP_VI = OP_IVI | (VSLIDEUP_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSLIDEUP_VX = OP_IVX | (VSLIDEUP_FUNCT6 << kRvvFunct6Shift),
+
+ VSLIDEDOWN_FUNCT6 = 0b001111,
+ RO_V_VSLIDEDOWN_VI = OP_IVI | (VSLIDEDOWN_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSLIDEDOWN_VX = OP_IVX | (VSLIDEDOWN_FUNCT6 << kRvvFunct6Shift),
+
+ VSRL_FUNCT6 = 0b101000,
+ RO_V_VSRL_VI = OP_IVI | (VSRL_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSRL_VV = OP_IVV | (VSRL_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSRL_VX = OP_IVX | (VSRL_FUNCT6 << kRvvFunct6Shift),
+
+ VSRA_FUNCT6 = 0b101001,
+ RO_V_VSRA_VI = OP_IVI | (VSRA_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSRA_VV = OP_IVV | (VSRA_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSRA_VX = OP_IVX | (VSRA_FUNCT6 << kRvvFunct6Shift),
+
+ VSLL_FUNCT6 = 0b100101,
+ RO_V_VSLL_VI = OP_IVI | (VSLL_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSLL_VV = OP_IVV | (VSLL_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSLL_VX = OP_IVX | (VSLL_FUNCT6 << kRvvFunct6Shift),
+
+ VSMUL_FUNCT6 = 0b100111,
+ RO_V_VSMUL_VV = OP_IVV | (VSMUL_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSMUL_VX = OP_IVX | (VSMUL_FUNCT6 << kRvvFunct6Shift),
+
+ VADC_FUNCT6 = 0b010000,
+ RO_V_VADC_VI = OP_IVI | (VADC_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VADC_VV = OP_IVV | (VADC_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VADC_VX = OP_IVX | (VADC_FUNCT6 << kRvvFunct6Shift),
+
+ VMADC_FUNCT6 = 0b010001,
+ RO_V_VMADC_VI = OP_IVI | (VMADC_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMADC_VV = OP_IVV | (VMADC_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMADC_VX = OP_IVX | (VMADC_FUNCT6 << kRvvFunct6Shift),
+
+ VWXUNARY0_FUNCT6 = 0b010000,
+ VRXUNARY0_FUNCT6 = 0b010000,
+ VMUNARY0_FUNCT6 = 0b010100,
+
+ RO_V_VWXUNARY0 = OP_MVV | (VWXUNARY0_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VRXUNARY0 = OP_MVX | (VRXUNARY0_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMUNARY0 = OP_MVV | (VMUNARY0_FUNCT6 << kRvvFunct6Shift),
+
+ VID_V = 0b10001,
+
+ VXUNARY0_FUNCT6 = 0b010010,
+ RO_V_VXUNARY0 = OP_MVV | (VXUNARY0_FUNCT6 << kRvvFunct6Shift),
+
+ VWFUNARY0_FUNCT6 = 0b010000,
+ RO_V_VFMV_FS = OP_FVV | (VWFUNARY0_FUNCT6 << kRvvFunct6Shift),
+
+ VRFUNARY0_FUNCT6 = 0b010000,
+ RO_V_VFMV_SF = OP_FVF | (VRFUNARY0_FUNCT6 << kRvvFunct6Shift),
+
+ VREDMAXU_FUNCT6 = 0b000110,
+ RO_V_VREDMAXU = OP_MVV | (VREDMAXU_FUNCT6 << kRvvFunct6Shift),
+ VREDMAX_FUNCT6 = 0b000111,
+ RO_V_VREDMAX = OP_MVV | (VREDMAX_FUNCT6 << kRvvFunct6Shift),
+
+ VREDMINU_FUNCT6 = 0b000100,
+ RO_V_VREDMINU = OP_MVV | (VREDMINU_FUNCT6 << kRvvFunct6Shift),
+ VREDMIN_FUNCT6 = 0b000101,
+ RO_V_VREDMIN = OP_MVV | (VREDMIN_FUNCT6 << kRvvFunct6Shift),
+
+ VFUNARY0_FUNCT6 = 0b010010,
+ RO_V_VFUNARY0 = OP_FVV | (VFUNARY0_FUNCT6 << kRvvFunct6Shift),
+ VFUNARY1_FUNCT6 = 0b010011,
+ RO_V_VFUNARY1 = OP_FVV | (VFUNARY1_FUNCT6 << kRvvFunct6Shift),
+
+ VFCVT_XU_F_V = 0b00000,
+ VFCVT_X_F_V = 0b00001,
+ VFCVT_F_XU_V = 0b00010,
+ VFCVT_F_X_V = 0b00011,
+ VFWCVT_XU_F_V = 0b01000,
+ VFWCVT_X_F_V = 0b01001,
+ VFWCVT_F_XU_V = 0b01010,
+ VFWCVT_F_X_V = 0b01011,
+ VFWCVT_F_F_V = 0b01100,
+ VFNCVT_F_F_W = 0b10100,
+ VFNCVT_X_F_W = 0b10001,
+ VFNCVT_XU_F_W = 0b10000,
+
+ VFCLASS_V = 0b10000,
+ VFSQRT_V = 0b00000,
+ VFRSQRT7_V = 0b00100,
+ VFREC7_V = 0b00101,
+
+ VFADD_FUNCT6 = 0b000000,
+ RO_V_VFADD_VV = OP_FVV | (VFADD_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFADD_VF = OP_FVF | (VFADD_FUNCT6 << kRvvFunct6Shift),
+
+ VFSUB_FUNCT6 = 0b000010,
+ RO_V_VFSUB_VV = OP_FVV | (VFSUB_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFSUB_VF = OP_FVF | (VFSUB_FUNCT6 << kRvvFunct6Shift),
+
+ VFDIV_FUNCT6 = 0b100000,
+ RO_V_VFDIV_VV = OP_FVV | (VFDIV_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFDIV_VF = OP_FVF | (VFDIV_FUNCT6 << kRvvFunct6Shift),
+
+ VFMUL_FUNCT6 = 0b100100,
+ RO_V_VFMUL_VV = OP_FVV | (VFMUL_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFMUL_VF = OP_FVF | (VFMUL_FUNCT6 << kRvvFunct6Shift),
+
+ // Vector Widening Floating-Point Add/Subtract Instructions
+ VFWADD_FUNCT6 = 0b110000,
+ RO_V_VFWADD_VV = OP_FVV | (VFWADD_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFWADD_VF = OP_FVF | (VFWADD_FUNCT6 << kRvvFunct6Shift),
+
+ VFWSUB_FUNCT6 = 0b110010,
+ RO_V_VFWSUB_VV = OP_FVV | (VFWSUB_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFWSUB_VF = OP_FVF | (VFWSUB_FUNCT6 << kRvvFunct6Shift),
+
+ VFWADD_W_FUNCT6 = 0b110100,
+ RO_V_VFWADD_W_VV = OP_FVV | (VFWADD_W_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFWADD_W_VF = OP_FVF | (VFWADD_W_FUNCT6 << kRvvFunct6Shift),
+
+ VFWSUB_W_FUNCT6 = 0b110110,
+ RO_V_VFWSUB_W_VV = OP_FVV | (VFWSUB_W_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFWSUB_W_VF = OP_FVF | (VFWSUB_W_FUNCT6 << kRvvFunct6Shift),
+
+ // Vector Widening Floating-Point Reduction Instructions
+ VFWREDUSUM_FUNCT6 = 0b110001,
+ RO_V_VFWREDUSUM_VV = OP_FVV | (VFWREDUSUM_FUNCT6 << kRvvFunct6Shift),
+
+ VFWREDOSUM_FUNCT6 = 0b110011,
+ RO_V_VFWREDOSUM_VV = OP_FVV | (VFWREDOSUM_FUNCT6 << kRvvFunct6Shift),
+
+ // Vector Widening Floating-Point Multiply
+ VFWMUL_FUNCT6 = 0b111000,
+ RO_V_VFWMUL_VV = OP_FVV | (VFWMUL_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFWMUL_VF = OP_FVF | (VFWMUL_FUNCT6 << kRvvFunct6Shift),
+
+ VMFEQ_FUNCT6 = 0b011000,
+ RO_V_VMFEQ_VV = OP_FVV | (VMFEQ_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMFEQ_VF = OP_FVF | (VMFEQ_FUNCT6 << kRvvFunct6Shift),
+
+ VMFNE_FUNCT6 = 0b011100,
+ RO_V_VMFNE_VV = OP_FVV | (VMFNE_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMFNE_VF = OP_FVF | (VMFNE_FUNCT6 << kRvvFunct6Shift),
+
+ VMFLT_FUNCT6 = 0b011011,
+ RO_V_VMFLT_VV = OP_FVV | (VMFLT_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMFLT_VF = OP_FVF | (VMFLT_FUNCT6 << kRvvFunct6Shift),
+
+ VMFLE_FUNCT6 = 0b011001,
+ RO_V_VMFLE_VV = OP_FVV | (VMFLE_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMFLE_VF = OP_FVF | (VMFLE_FUNCT6 << kRvvFunct6Shift),
+
+ VMFGE_FUNCT6 = 0b011111,
+ RO_V_VMFGE_VF = OP_FVF | (VMFGE_FUNCT6 << kRvvFunct6Shift),
+
+ VMFGT_FUNCT6 = 0b011101,
+ RO_V_VMFGT_VF = OP_FVF | (VMFGT_FUNCT6 << kRvvFunct6Shift),
+
+ VFMAX_FUNCT6 = 0b000110,
+ RO_V_VFMAX_VV = OP_FVV | (VFMAX_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFMAX_VF = OP_FVF | (VFMAX_FUNCT6 << kRvvFunct6Shift),
+
+ VFREDMAX_FUNCT6 = 0b0001111,
+ RO_V_VFREDMAX_VV = OP_FVV | (VFREDMAX_FUNCT6 << kRvvFunct6Shift),
+
+ VFMIN_FUNCT6 = 0b000100,
+ RO_V_VFMIN_VV = OP_FVV | (VFMIN_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFMIN_VF = OP_FVF | (VFMIN_FUNCT6 << kRvvFunct6Shift),
+
+ VFSGNJ_FUNCT6 = 0b001000,
+ RO_V_VFSGNJ_VV = OP_FVV | (VFSGNJ_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFSGNJ_VF = OP_FVF | (VFSGNJ_FUNCT6 << kRvvFunct6Shift),
+
+ VFSGNJN_FUNCT6 = 0b001001,
+ RO_V_VFSGNJN_VV = OP_FVV | (VFSGNJN_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFSGNJN_VF = OP_FVF | (VFSGNJN_FUNCT6 << kRvvFunct6Shift),
+
+ VFSGNJX_FUNCT6 = 0b001010,
+ RO_V_VFSGNJX_VV = OP_FVV | (VFSGNJX_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFSGNJX_VF = OP_FVF | (VFSGNJX_FUNCT6 << kRvvFunct6Shift),
+
+ VFMADD_FUNCT6 = 0b101000,
+ RO_V_VFMADD_VV = OP_FVV | (VFMADD_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFMADD_VF = OP_FVF | (VFMADD_FUNCT6 << kRvvFunct6Shift),
+
+ VFNMADD_FUNCT6 = 0b101001,
+ RO_V_VFNMADD_VV = OP_FVV | (VFNMADD_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFNMADD_VF = OP_FVF | (VFNMADD_FUNCT6 << kRvvFunct6Shift),
+
+ VFMSUB_FUNCT6 = 0b101010,
+ RO_V_VFMSUB_VV = OP_FVV | (VFMSUB_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFMSUB_VF = OP_FVF | (VFMSUB_FUNCT6 << kRvvFunct6Shift),
+
+ VFNMSUB_FUNCT6 = 0b101011,
+ RO_V_VFNMSUB_VV = OP_FVV | (VFNMSUB_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFNMSUB_VF = OP_FVF | (VFNMSUB_FUNCT6 << kRvvFunct6Shift),
+
+ VFMACC_FUNCT6 = 0b101100,
+ RO_V_VFMACC_VV = OP_FVV | (VFMACC_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFMACC_VF = OP_FVF | (VFMACC_FUNCT6 << kRvvFunct6Shift),
+
+ VFNMACC_FUNCT6 = 0b101101,
+ RO_V_VFNMACC_VV = OP_FVV | (VFNMACC_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFNMACC_VF = OP_FVF | (VFNMACC_FUNCT6 << kRvvFunct6Shift),
+
+ VFMSAC_FUNCT6 = 0b101110,
+ RO_V_VFMSAC_VV = OP_FVV | (VFMSAC_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFMSAC_VF = OP_FVF | (VFMSAC_FUNCT6 << kRvvFunct6Shift),
+
+ VFNMSAC_FUNCT6 = 0b101111,
+ RO_V_VFNMSAC_VV = OP_FVV | (VFNMSAC_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFNMSAC_VF = OP_FVF | (VFNMSAC_FUNCT6 << kRvvFunct6Shift),
+
+ // Vector Widening Floating-Point Fused Multiply-Add Instructions
+ VFWMACC_FUNCT6 = 0b111100,
+ RO_V_VFWMACC_VV = OP_FVV | (VFWMACC_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFWMACC_VF = OP_FVF | (VFWMACC_FUNCT6 << kRvvFunct6Shift),
+
+ VFWNMACC_FUNCT6 = 0b111101,
+ RO_V_VFWNMACC_VV = OP_FVV | (VFWNMACC_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFWNMACC_VF = OP_FVF | (VFWNMACC_FUNCT6 << kRvvFunct6Shift),
+
+ VFWMSAC_FUNCT6 = 0b111110,
+ RO_V_VFWMSAC_VV = OP_FVV | (VFWMSAC_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFWMSAC_VF = OP_FVF | (VFWMSAC_FUNCT6 << kRvvFunct6Shift),
+
+ VFWNMSAC_FUNCT6 = 0b111111,
+ RO_V_VFWNMSAC_VV = OP_FVV | (VFWNMSAC_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFWNMSAC_VF = OP_FVF | (VFWNMSAC_FUNCT6 << kRvvFunct6Shift),
+
+ VNCLIP_FUNCT6 = 0b101111,
+ RO_V_VNCLIP_WV = OP_IVV | (VNCLIP_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VNCLIP_WX = OP_IVX | (VNCLIP_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VNCLIP_WI = OP_IVI | (VNCLIP_FUNCT6 << kRvvFunct6Shift),
+
+ VNCLIPU_FUNCT6 = 0b101110,
+ RO_V_VNCLIPU_WV = OP_IVV | (VNCLIPU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VNCLIPU_WX = OP_IVX | (VNCLIPU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VNCLIPU_WI = OP_IVI | (VNCLIPU_FUNCT6 << kRvvFunct6Shift),
+};
+} // namespace jit
+} // namespace js
+
+#endif // jit_riscv64_constant_Constant_riscv64_v_h_
diff --git a/js/src/jit/riscv64/constant/Constant-riscv-zicsr.h b/js/src/jit/riscv64/constant/Constant-riscv-zicsr.h
new file mode 100644
index 0000000000..6fecfa3d92
--- /dev/null
+++ b/js/src/jit/riscv64/constant/Constant-riscv-zicsr.h
@@ -0,0 +1,30 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#ifndef jit_riscv64_constant_Constant_riscv64_zicsr_h_
+#define jit_riscv64_constant_Constant_riscv64_zicsr_h_
+
+#include "jit/riscv64/constant/Base-constant-riscv.h"
+namespace js {
+namespace jit {
+// RISCV CSR related bit mask and shift
+const int kFcsrFlagsBits = 5;
+const uint32_t kFcsrFlagsMask = (1 << kFcsrFlagsBits) - 1;
+const int kFcsrFrmBits = 3;
+const int kFcsrFrmShift = kFcsrFlagsBits;
+const uint32_t kFcsrFrmMask = ((1 << kFcsrFrmBits) - 1) << kFcsrFrmShift;
+const int kFcsrBits = kFcsrFlagsBits + kFcsrFrmBits;
+const uint32_t kFcsrMask = kFcsrFlagsMask | kFcsrFrmMask;
+
+enum OpcodeRISCVZICSR : uint32_t {
+ // RV32/RV64 Zicsr Standard Extension
+ RO_CSRRW = SYSTEM | (0b001 << kFunct3Shift),
+ RO_CSRRS = SYSTEM | (0b010 << kFunct3Shift),
+ RO_CSRRC = SYSTEM | (0b011 << kFunct3Shift),
+ RO_CSRRWI = SYSTEM | (0b101 << kFunct3Shift),
+ RO_CSRRSI = SYSTEM | (0b110 << kFunct3Shift),
+ RO_CSRRCI = SYSTEM | (0b111 << kFunct3Shift),
+};
+} // namespace jit
+} // namespace js
+#endif // jit_riscv64_constant_Constant_riscv64_zicsr_h_
diff --git a/js/src/jit/riscv64/constant/Constant-riscv-zifencei.h b/js/src/jit/riscv64/constant/Constant-riscv-zifencei.h
new file mode 100644
index 0000000000..be01cd0ae0
--- /dev/null
+++ b/js/src/jit/riscv64/constant/Constant-riscv-zifencei.h
@@ -0,0 +1,15 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#ifndef jit_riscv64_constant_Constant_riscv64_zifencei_h_
+#define jit_riscv64_constant_Constant_riscv64_zifencei_h_
+
+#include "jit/riscv64/constant/Base-constant-riscv.h"
+namespace js {
+namespace jit {
+enum OpcodeRISCVIFENCEI : uint32_t {
+ RO_FENCE_I = MISC_MEM | (0b001 << kFunct3Shift),
+};
+}
+} // namespace js
+#endif // jit_riscv64_constant_Constant_riscv64_zifencei_h_
diff --git a/js/src/jit/riscv64/constant/Constant-riscv64.h b/js/src/jit/riscv64/constant/Constant-riscv64.h
new file mode 100644
index 0000000000..b9b1f894e7
--- /dev/null
+++ b/js/src/jit/riscv64/constant/Constant-riscv64.h
@@ -0,0 +1,68 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_riscv64_constant_Constant_riscv64_h
+#define jit_riscv64_constant_Constant_riscv64_h
+#include "mozilla/Assertions.h"
+#include "mozilla/Types.h"
+
+#include <stdio.h>
+
+#include "jit/riscv64/constant/Base-constant-riscv.h"
+#include "jit/riscv64/constant/Constant-riscv-a.h"
+#include "jit/riscv64/constant/Constant-riscv-c.h"
+#include "jit/riscv64/constant/Constant-riscv-d.h"
+#include "jit/riscv64/constant/Constant-riscv-f.h"
+#include "jit/riscv64/constant/Constant-riscv-i.h"
+#include "jit/riscv64/constant/Constant-riscv-m.h"
+#include "jit/riscv64/constant/Constant-riscv-v.h"
+#include "jit/riscv64/constant/Constant-riscv-zicsr.h"
+#include "jit/riscv64/constant/Constant-riscv-zifencei.h"
+
+namespace js {
+namespace jit {
+
+// A reasonable (ie, safe) buffer size for the disassembly of a single
+// instruction.
+const int ReasonableBufferSize = 256;
+
+// Difference between address of current opcode and value read from pc
+// register.
+static constexpr int kPcLoadDelta = 4;
+
+// Bits available for offset field in branches
+static constexpr int kBranchOffsetBits = 13;
+
+// Bits available for offset field in jump
+static constexpr int kJumpOffsetBits = 21;
+
+// Bits available for offset field in compresed jump
+static constexpr int kCJalOffsetBits = 12;
+
+// Bits available for offset field in 4 branch
+static constexpr int kCBranchOffsetBits = 9;
+
+// Max offset for b instructions with 12-bit offset field (multiple of 2)
+static constexpr int kMaxBranchOffset = (1 << (kBranchOffsetBits - 1)) - 1;
+
+static constexpr int kCBranchOffset = (1 << (kCBranchOffsetBits - 1)) - 1;
+// Max offset for jal instruction with 20-bit offset field (multiple of 2)
+static constexpr int kMaxJumpOffset = (1 << (kJumpOffsetBits - 1)) - 1;
+
+static constexpr int kCJumpOffset = (1 << (kCJalOffsetBits - 1)) - 1;
+
+static constexpr int kTrampolineSlotsSize = 2 * kInstrSize;
+
+static_assert(kCJalOffsetBits == kOffset12);
+static_assert(kCBranchOffsetBits == kOffset9);
+static_assert(kJumpOffsetBits == kOffset21);
+static_assert(kBranchOffsetBits == kOffset13);
+// Vector as used by the original code to allow for minimal modification.
+// Functions exactly like a character array with helper methods.
+} // namespace jit
+} // namespace js
+
+#endif // jit_riscv64_constant_Constant_riscv64_h
diff --git a/js/src/jit/riscv64/constant/util-riscv64.h b/js/src/jit/riscv64/constant/util-riscv64.h
new file mode 100644
index 0000000000..089e0f3b94
--- /dev/null
+++ b/js/src/jit/riscv64/constant/util-riscv64.h
@@ -0,0 +1,82 @@
+
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#ifndef jit_riscv64_constant_util_riscv64__h_
+#define jit_riscv64_constant_util_riscv64__h_
+#include <stdarg.h>
+#include <stdio.h>
+#include <string.h>
+namespace js {
+namespace jit {
+template <typename T>
+class V8Vector {
+ public:
+ V8Vector() : start_(nullptr), length_(0) {}
+ V8Vector(T* data, int length) : start_(data), length_(length) {
+ MOZ_ASSERT(length == 0 || (length > 0 && data != nullptr));
+ }
+
+ // Returns the length of the vector.
+ int length() const { return length_; }
+
+ // Returns the pointer to the start of the data in the vector.
+ T* start() const { return start_; }
+
+ // Access individual vector elements - checks bounds in debug mode.
+ T& operator[](int index) const {
+ MOZ_ASSERT(0 <= index && index < length_);
+ return start_[index];
+ }
+
+ inline V8Vector<T> operator+(int offset) {
+ MOZ_ASSERT(offset < length_);
+ return V8Vector<T>(start_ + offset, length_ - offset);
+ }
+
+ private:
+ T* start_;
+ int length_;
+};
+
+template <typename T, int kSize>
+class EmbeddedVector : public V8Vector<T> {
+ public:
+ EmbeddedVector() : V8Vector<T>(buffer_, kSize) {}
+
+ explicit EmbeddedVector(T initial_value) : V8Vector<T>(buffer_, kSize) {
+ for (int i = 0; i < kSize; ++i) {
+ buffer_[i] = initial_value;
+ }
+ }
+
+ // When copying, make underlying Vector to reference our buffer.
+ EmbeddedVector(const EmbeddedVector& rhs) : V8Vector<T>(rhs) {
+ MemCopy(buffer_, rhs.buffer_, sizeof(T) * kSize);
+ this->set_start(buffer_);
+ }
+
+ EmbeddedVector& operator=(const EmbeddedVector& rhs) {
+ if (this == &rhs) return *this;
+ V8Vector<T>::operator=(rhs);
+ MemCopy(buffer_, rhs.buffer_, sizeof(T) * kSize);
+ this->set_start(buffer_);
+ return *this;
+ }
+
+ private:
+ T buffer_[kSize];
+};
+
+// Helper function for printing to a Vector.
+static inline int MOZ_FORMAT_PRINTF(2, 3)
+ SNPrintF(V8Vector<char> str, const char* format, ...) {
+ va_list args;
+ va_start(args, format);
+ int result = vsnprintf(str.start(), str.length(), format, args);
+ va_end(args);
+ return result;
+}
+} // namespace jit
+} // namespace js
+#endif
diff --git a/js/src/jit/riscv64/disasm/Disasm-riscv64.cpp b/js/src/jit/riscv64/disasm/Disasm-riscv64.cpp
new file mode 100644
index 0000000000..bd9770d074
--- /dev/null
+++ b/js/src/jit/riscv64/disasm/Disasm-riscv64.cpp
@@ -0,0 +1,2155 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ */
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// A Disassembler object is used to disassemble a block of code instruction by
+// instruction. The default implementation of the NameConverter object can be
+// overriden to modify register names or to do symbol lookup on addresses.
+//
+// The example below will disassemble a block of code and print it to stdout.
+//
+// disasm::NameConverter converter;
+// disasm::Disassembler d(converter);
+// for (uint8_t* pc = begin; pc < end;) {
+// disasm::EmbeddedVector<char, disasm::ReasonableBufferSize> buffer;
+// uint8_t* prev_pc = pc;
+// pc += d.InstructionDecode(buffer, pc);
+// printf("%p %08x %s\n",
+// prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer);
+// }
+//
+// The Disassembler class also has a convenience method to disassemble a block
+// of code into a FILE*, meaning that the above functionality could also be
+// achieved by just calling Disassembler::Disassemble(stdout, begin, end);
+
+#include "jit/riscv64/disasm/Disasm-riscv64.h"
+
+#include <stdarg.h>
+#include <stdio.h>
+#include <string.h>
+
+#include "jit/riscv64/Assembler-riscv64.h"
+
+namespace js {
+namespace jit {
+namespace disasm {
+
+#define UNSUPPORTED_RISCV() printf("Unsupported instruction %d.\n", __LINE__)
+//------------------------------------------------------------------------------
+
+// Decoder decodes and disassembles instructions into an output buffer.
+// It uses the converter to convert register names and call destinations into
+// more informative description.
+class Decoder {
+ public:
+ Decoder(const disasm::NameConverter& converter, V8Vector<char> out_buffer)
+ : converter_(converter), out_buffer_(out_buffer), out_buffer_pos_(0) {
+ out_buffer_[out_buffer_pos_] = '\0';
+ }
+
+ ~Decoder() {}
+
+ // Writes one disassembled instruction into 'buffer' (0-terminated).
+ // Returns the length of the disassembled machine instruction in bytes.
+ int InstructionDecode(uint8_t* instruction);
+
+ static bool IsConstantPoolAt(uint8_t* instr_ptr);
+ static int ConstantPoolSizeAt(uint8_t* instr_ptr);
+
+ private:
+ // Bottleneck functions to print into the out_buffer.
+ void PrintChar(const char ch);
+ void Print(const char* str);
+
+ // Printing of common values.
+ void PrintRegister(int reg);
+ void PrintFPURegister(int freg);
+ void PrintVRegister(int reg);
+ void PrintFPUStatusRegister(int freg);
+ void PrintRs1(Instruction* instr);
+ void PrintRs2(Instruction* instr);
+ void PrintRd(Instruction* instr);
+ void PrintUimm(Instruction* instr);
+ void PrintVs1(Instruction* instr);
+ void PrintVs2(Instruction* instr);
+ void PrintVd(Instruction* instr);
+ void PrintFRs1(Instruction* instr);
+ void PrintFRs2(Instruction* instr);
+ void PrintFRs3(Instruction* instr);
+ void PrintFRd(Instruction* instr);
+ void PrintImm12(Instruction* instr);
+ void PrintImm12X(Instruction* instr);
+ void PrintImm20U(Instruction* instr);
+ void PrintImm20J(Instruction* instr);
+ void PrintShamt(Instruction* instr);
+ void PrintShamt32(Instruction* instr);
+ void PrintRvcImm6(Instruction* instr);
+ void PrintRvcImm6U(Instruction* instr);
+ void PrintRvcImm6Addi16sp(Instruction* instr);
+ void PrintRvcShamt(Instruction* instr);
+ void PrintRvcImm6Ldsp(Instruction* instr);
+ void PrintRvcImm6Lwsp(Instruction* instr);
+ void PrintRvcImm6Sdsp(Instruction* instr);
+ void PrintRvcImm6Swsp(Instruction* instr);
+ void PrintRvcImm5W(Instruction* instr);
+ void PrintRvcImm5D(Instruction* instr);
+ void PrintRvcImm8Addi4spn(Instruction* instr);
+ void PrintRvcImm11CJ(Instruction* instr);
+ void PrintRvcImm8B(Instruction* instr);
+ void PrintRvvVm(Instruction* instr);
+ void PrintAcquireRelease(Instruction* instr);
+ void PrintBranchOffset(Instruction* instr);
+ void PrintStoreOffset(Instruction* instr);
+ void PrintCSRReg(Instruction* instr);
+ void PrintRvvSEW(Instruction* instr);
+ void PrintRvvLMUL(Instruction* instr);
+ void PrintRvvSimm5(Instruction* instr);
+ void PrintRvvUimm5(Instruction* instr);
+ void PrintRoundingMode(Instruction* instr);
+ void PrintMemoryOrder(Instruction* instr, bool is_pred);
+
+ // Each of these functions decodes one particular instruction type.
+ void DecodeRType(Instruction* instr);
+ void DecodeR4Type(Instruction* instr);
+ void DecodeRAType(Instruction* instr);
+ void DecodeRFPType(Instruction* instr);
+ void DecodeIType(Instruction* instr);
+ void DecodeSType(Instruction* instr);
+ void DecodeBType(Instruction* instr);
+ void DecodeUType(Instruction* instr);
+ void DecodeJType(Instruction* instr);
+ void DecodeCRType(Instruction* instr);
+ void DecodeCAType(Instruction* instr);
+ void DecodeCIType(Instruction* instr);
+ void DecodeCIWType(Instruction* instr);
+ void DecodeCSSType(Instruction* instr);
+ void DecodeCLType(Instruction* instr);
+ void DecodeCSType(Instruction* instr);
+ void DecodeCJType(Instruction* instr);
+ void DecodeCBType(Instruction* instr);
+
+ // Printing of instruction name.
+ void PrintInstructionName(Instruction* instr);
+ void PrintTarget(Instruction* instr);
+
+ // Handle formatting of instructions and their options.
+ int FormatRegister(Instruction* instr, const char* option);
+ int FormatFPURegisterOrRoundMode(Instruction* instr, const char* option);
+ int FormatRvcRegister(Instruction* instr, const char* option);
+ int FormatRvcImm(Instruction* instr, const char* option);
+ int FormatOption(Instruction* instr, const char* option);
+ void Format(Instruction* instr, const char* format);
+ void Unknown(Instruction* instr);
+
+ int switch_sew(Instruction* instr);
+ int switch_nf(Instruction* instr);
+
+ const disasm::NameConverter& converter_;
+ V8Vector<char> out_buffer_;
+ int out_buffer_pos_;
+
+ // Disallow copy and assign.
+ Decoder(const Decoder&) = delete;
+ void operator=(const Decoder&) = delete;
+};
+
+// Support for assertions in the Decoder formatting functions.
+#define STRING_STARTS_WITH(string, compare_string) \
+ (strncmp(string, compare_string, strlen(compare_string)) == 0)
+
+// Append the ch to the output buffer.
+void Decoder::PrintChar(const char ch) { out_buffer_[out_buffer_pos_++] = ch; }
+
+// Append the str to the output buffer.
+void Decoder::Print(const char* str) {
+ char cur = *str++;
+ while (cur != '\0' && (out_buffer_pos_ < int(out_buffer_.length() - 1))) {
+ PrintChar(cur);
+ cur = *str++;
+ }
+ out_buffer_[out_buffer_pos_] = 0;
+}
+
+int Decoder::switch_nf(Instruction* instr) {
+ int nf = 0;
+ switch (instr->InstructionBits() & kRvvNfMask) {
+ case 0x20000000:
+ nf = 2;
+ break;
+ case 0x40000000:
+ nf = 3;
+ break;
+ case 0x60000000:
+ nf = 4;
+ break;
+ case 0x80000000:
+ nf = 5;
+ break;
+ case 0xa0000000:
+ nf = 6;
+ break;
+ case 0xc0000000:
+ nf = 7;
+ break;
+ case 0xe0000000:
+ nf = 8;
+ break;
+ }
+ return nf;
+}
+
+int Decoder::switch_sew(Instruction* instr) {
+ int width = 0;
+ if ((instr->InstructionBits() & kBaseOpcodeMask) != LOAD_FP &&
+ (instr->InstructionBits() & kBaseOpcodeMask) != STORE_FP)
+ return -1;
+ switch (instr->InstructionBits() & (kRvvWidthMask | kRvvMewMask)) {
+ case 0x0:
+ width = 8;
+ break;
+ case 0x00005000:
+ width = 16;
+ break;
+ case 0x00006000:
+ width = 32;
+ break;
+ case 0x00007000:
+ width = 64;
+ break;
+ case 0x10000000:
+ width = 128;
+ break;
+ case 0x10005000:
+ width = 256;
+ break;
+ case 0x10006000:
+ width = 512;
+ break;
+ case 0x10007000:
+ width = 1024;
+ break;
+ default:
+ width = -1;
+ break;
+ }
+ return width;
+}
+
+// Handle all register based formatting in this function to reduce the
+// complexity of FormatOption.
+int Decoder::FormatRegister(Instruction* instr, const char* format) {
+ MOZ_ASSERT(format[0] == 'r');
+ if (format[1] == 's') { // 'rs[12]: Rs register.
+ if (format[2] == '1') {
+ int reg = instr->Rs1Value();
+ PrintRegister(reg);
+ return 3;
+ } else if (format[2] == '2') {
+ int reg = instr->Rs2Value();
+ PrintRegister(reg);
+ return 3;
+ }
+ MOZ_CRASH();
+ } else if (format[1] == 'd') { // 'rd: rd register.
+ int reg = instr->RdValue();
+ PrintRegister(reg);
+ return 2;
+ }
+ MOZ_CRASH();
+}
+
+// Handle all FPUregister based formatting in this function to reduce the
+// complexity of FormatOption.
+int Decoder::FormatFPURegisterOrRoundMode(Instruction* instr,
+ const char* format) {
+ MOZ_ASSERT(format[0] == 'f');
+ if (format[1] == 's') { // 'fs[1-3]: Rs register.
+ if (format[2] == '1') {
+ int reg = instr->Rs1Value();
+ PrintFPURegister(reg);
+ return 3;
+ } else if (format[2] == '2') {
+ int reg = instr->Rs2Value();
+ PrintFPURegister(reg);
+ return 3;
+ } else if (format[2] == '3') {
+ int reg = instr->Rs3Value();
+ PrintFPURegister(reg);
+ return 3;
+ }
+ MOZ_CRASH();
+ } else if (format[1] == 'd') { // 'fd: fd register.
+ int reg = instr->RdValue();
+ PrintFPURegister(reg);
+ return 2;
+ } else if (format[1] == 'r') { // 'frm
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "frm"));
+ PrintRoundingMode(instr);
+ return 3;
+ }
+ MOZ_CRASH();
+}
+
+// Handle all C extension register based formatting in this function to reduce
+// the complexity of FormatOption.
+int Decoder::FormatRvcRegister(Instruction* instr, const char* format) {
+ MOZ_ASSERT(format[0] == 'C');
+ MOZ_ASSERT(format[1] == 'r' || format[1] == 'f');
+ if (format[2] == 's') { // 'Crs[12]: Rs register.
+ if (format[3] == '1') {
+ if (format[4] == 's') { // 'Crs1s: 3-bits register
+ int reg = instr->RvcRs1sValue();
+ if (format[1] == 'r') {
+ PrintRegister(reg);
+ } else if (format[1] == 'f') {
+ PrintFPURegister(reg);
+ }
+ return 5;
+ }
+ int reg = instr->RvcRs1Value();
+ if (format[1] == 'r') {
+ PrintRegister(reg);
+ } else if (format[1] == 'f') {
+ PrintFPURegister(reg);
+ }
+ return 4;
+ } else if (format[3] == '2') {
+ if (format[4] == 's') { // 'Crs2s: 3-bits register
+ int reg = instr->RvcRs2sValue();
+ if (format[1] == 'r') {
+ PrintRegister(reg);
+ } else if (format[1] == 'f') {
+ PrintFPURegister(reg);
+ }
+ return 5;
+ }
+ int reg = instr->RvcRs2Value();
+ if (format[1] == 'r') {
+ PrintRegister(reg);
+ } else if (format[1] == 'f') {
+ PrintFPURegister(reg);
+ }
+ return 4;
+ }
+ MOZ_CRASH();
+ } else if (format[2] == 'd') { // 'Crd: rd register.
+ int reg = instr->RvcRdValue();
+ if (format[1] == 'r') {
+ PrintRegister(reg);
+ } else if (format[1] == 'f') {
+ PrintFPURegister(reg);
+ }
+ return 3;
+ }
+ MOZ_CRASH();
+}
+
+// Handle all C extension immediates based formatting in this function to reduce
+// the complexity of FormatOption.
+int Decoder::FormatRvcImm(Instruction* instr, const char* format) {
+ // TODO(riscv): add other rvc imm format
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "Cimm"));
+ if (format[4] == '6') {
+ if (format[5] == 'U') {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "Cimm6U"));
+ PrintRvcImm6U(instr);
+ return 6;
+ } else if (format[5] == 'A') {
+ if (format[9] == '1' && format[10] == '6') {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "Cimm6Addi16sp"));
+ PrintRvcImm6Addi16sp(instr);
+ return 13;
+ }
+ MOZ_CRASH();
+ } else if (format[5] == 'L') {
+ if (format[6] == 'd') {
+ if (format[7] == 's') {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "Cimm6Ldsp"));
+ PrintRvcImm6Ldsp(instr);
+ return 9;
+ }
+ } else if (format[6] == 'w') {
+ if (format[7] == 's') {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "Cimm6Lwsp"));
+ PrintRvcImm6Lwsp(instr);
+ return 9;
+ }
+ }
+ MOZ_CRASH();
+ } else if (format[5] == 'S') {
+ if (format[6] == 'w') {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "Cimm6Swsp"));
+ PrintRvcImm6Swsp(instr);
+ return 9;
+ } else if (format[6] == 'd') {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "Cimm6Sdsp"));
+ PrintRvcImm6Sdsp(instr);
+ return 9;
+ }
+ MOZ_CRASH();
+ }
+ PrintRvcImm6(instr);
+ return 5;
+ } else if (format[4] == '5') {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "Cimm5"));
+ if (format[5] == 'W') {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "Cimm5W"));
+ PrintRvcImm5W(instr);
+ return 6;
+ } else if (format[5] == 'D') {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "Cimm5D"));
+ PrintRvcImm5D(instr);
+ return 6;
+ }
+ MOZ_CRASH();
+ } else if (format[4] == '8') {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "Cimm8"));
+ if (format[5] == 'A') {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "Cimm8Addi4spn"));
+ PrintRvcImm8Addi4spn(instr);
+ return 13;
+ } else if (format[5] == 'B') {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "Cimm8B"));
+ PrintRvcImm8B(instr);
+ return 6;
+ }
+ MOZ_CRASH();
+ } else if (format[4] == '1') {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "Cimm1"));
+ if (format[5] == '1') {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "Cimm11CJ"));
+ PrintRvcImm11CJ(instr);
+ return 8;
+ }
+ MOZ_CRASH();
+ }
+ MOZ_CRASH();
+}
+
+// FormatOption takes a formatting string and interprets it based on
+// the current instructions. The format string points to the first
+// character of the option string (the option escape has already been
+// consumed by the caller.) FormatOption returns the number of
+// characters that were consumed from the formatting string.
+int Decoder::FormatOption(Instruction* instr, const char* format) {
+ switch (format[0]) {
+ case 'C': { // `C extension
+ if (format[1] == 'r' || format[1] == 'f') {
+ return FormatRvcRegister(instr, format);
+ } else if (format[1] == 'i') {
+ return FormatRvcImm(instr, format);
+ } else if (format[1] == 's') {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "Cshamt"));
+ PrintRvcShamt(instr);
+ return 6;
+ }
+ MOZ_CRASH();
+ }
+ case 'c': { // `csr: CSR registers
+ if (format[1] == 's') {
+ if (format[2] == 'r') {
+ PrintCSRReg(instr);
+ return 3;
+ }
+ }
+ MOZ_CRASH();
+ }
+ case 'i': { // 'imm12, 'imm12x, 'imm20U, or 'imm20J: Immediates.
+ if (format[3] == '1') {
+ if (format[4] == '2') {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "imm12"));
+ if (format[5] == 'x') {
+ PrintImm12X(instr);
+ return 6;
+ }
+ PrintImm12(instr);
+ return 5;
+ }
+ } else if (format[3] == '2' && format[4] == '0') {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "imm20"));
+ switch (format[5]) {
+ case 'U':
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "imm20U"));
+ PrintImm20U(instr);
+ break;
+ case 'J':
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "imm20J"));
+ PrintImm20J(instr);
+ break;
+ }
+ return 6;
+ }
+ MOZ_CRASH();
+ }
+ case 'o': { // 'offB or 'offS: Offsets.
+ if (format[3] == 'B') {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "offB"));
+ PrintBranchOffset(instr);
+ return 4;
+ } else if (format[3] == 'S') {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "offS"));
+ PrintStoreOffset(instr);
+ return 4;
+ }
+ MOZ_CRASH();
+ }
+ case 'r': { // 'r: registers.
+ return FormatRegister(instr, format);
+ }
+ case 'f': { // 'f: FPUregisters or `frm
+ return FormatFPURegisterOrRoundMode(instr, format);
+ }
+ case 'a': { // 'a: Atomic acquire and release.
+ PrintAcquireRelease(instr);
+ return 1;
+ }
+ case 'p': { // `pre
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "pre"));
+ PrintMemoryOrder(instr, true);
+ return 3;
+ }
+ case 's': { // 's32 or 's64: Shift amount.
+ if (format[1] == '3') {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "s32"));
+ PrintShamt32(instr);
+ return 3;
+ } else if (format[1] == '6') {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "s64"));
+ PrintShamt(instr);
+ return 3;
+ } else if (format[1] == 'u') {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "suc"));
+ PrintMemoryOrder(instr, false);
+ return 3;
+ } else if (format[1] == 'e') {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "sew"));
+ PrintRvvSEW(instr);
+ return 3;
+ } else if (format[1] == 'i') {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "simm5"));
+ PrintRvvSimm5(instr);
+ return 5;
+ }
+ MOZ_CRASH();
+ }
+ case 'v': {
+ if (format[1] == 'd') {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "vd"));
+ PrintVd(instr);
+ return 2;
+ } else if (format[2] == '1') {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "vs1"));
+ PrintVs1(instr);
+ return 3;
+ } else if (format[2] == '2') {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "vs2"));
+ PrintVs2(instr);
+ return 3;
+ } else {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "vm"));
+ PrintRvvVm(instr);
+ return 2;
+ }
+ }
+ case 'l': {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "lmul"));
+ PrintRvvLMUL(instr);
+ return 4;
+ }
+ case 'u': {
+ if (STRING_STARTS_WITH(format, "uimm5")) {
+ PrintRvvUimm5(instr);
+ return 5;
+ } else {
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "uimm"));
+ PrintUimm(instr);
+ return 4;
+ }
+ }
+ case 't': { // 'target: target of branch instructions'
+ MOZ_ASSERT(STRING_STARTS_WITH(format, "target"));
+ PrintTarget(instr);
+ return 6;
+ }
+ }
+ MOZ_CRASH();
+}
+
+// Format takes a formatting string for a whole instruction and prints it into
+// the output buffer. All escaped options are handed to FormatOption to be
+// parsed further.
+void Decoder::Format(Instruction* instr, const char* format) {
+ char cur = *format++;
+ while ((cur != 0) && (out_buffer_pos_ < (out_buffer_.length() - 1))) {
+ if (cur == '\'') { // Single quote is used as the formatting escape.
+ format += FormatOption(instr, format);
+ } else {
+ out_buffer_[out_buffer_pos_++] = cur;
+ }
+ cur = *format++;
+ }
+ out_buffer_[out_buffer_pos_] = '\0';
+}
+
+// The disassembler may end up decoding data inlined in the code. We do not want
+// it to crash if the data does not ressemble any known instruction.
+#define VERIFY(condition) \
+ if (!(condition)) { \
+ Unknown(instr); \
+ return; \
+ }
+
+// For currently unimplemented decodings the disassembler calls Unknown(instr)
+// which will just print "unknown" of the instruction bits.
+void Decoder::Unknown(Instruction* instr) { Format(instr, "unknown"); }
+
+// Print the register name according to the active name converter.
+void Decoder::PrintRegister(int reg) {
+ Print(converter_.NameOfCPURegister(reg));
+}
+
+void Decoder::PrintVRegister(int reg) { UNSUPPORTED_RISCV(); }
+
+void Decoder::PrintRs1(Instruction* instr) {
+ int reg = instr->Rs1Value();
+ PrintRegister(reg);
+}
+
+void Decoder::PrintRs2(Instruction* instr) {
+ int reg = instr->Rs2Value();
+ PrintRegister(reg);
+}
+
+void Decoder::PrintRd(Instruction* instr) {
+ int reg = instr->RdValue();
+ PrintRegister(reg);
+}
+
+void Decoder::PrintUimm(Instruction* instr) {
+ int val = instr->Rs1Value();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", val);
+}
+
+void Decoder::PrintVs1(Instruction* instr) {
+ int reg = instr->Vs1Value();
+ PrintVRegister(reg);
+}
+
+void Decoder::PrintVs2(Instruction* instr) {
+ int reg = instr->Vs2Value();
+ PrintVRegister(reg);
+}
+
+void Decoder::PrintVd(Instruction* instr) {
+ int reg = instr->VdValue();
+ PrintVRegister(reg);
+}
+
+// Print the FPUregister name according to the active name converter.
+void Decoder::PrintFPURegister(int freg) {
+ Print(converter_.NameOfXMMRegister(freg));
+}
+
+void Decoder::PrintFRs1(Instruction* instr) {
+ int reg = instr->Rs1Value();
+ PrintFPURegister(reg);
+}
+
+void Decoder::PrintFRs2(Instruction* instr) {
+ int reg = instr->Rs2Value();
+ PrintFPURegister(reg);
+}
+
+void Decoder::PrintFRs3(Instruction* instr) {
+ int reg = instr->Rs3Value();
+ PrintFPURegister(reg);
+}
+
+void Decoder::PrintFRd(Instruction* instr) {
+ int reg = instr->RdValue();
+ PrintFPURegister(reg);
+}
+
+void Decoder::PrintImm12X(Instruction* instr) {
+ int32_t imm = instr->Imm12Value();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm);
+}
+
+void Decoder::PrintImm12(Instruction* instr) {
+ int32_t imm = instr->Imm12Value();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
+}
+
+void Decoder::PrintTarget(Instruction* instr) {
+ // if (Assembler::IsJalr(instr->InstructionBits())) {
+ // if (Assembler::IsAuipc((instr - 4)->InstructionBits()) &&
+ // (instr - 4)->RdValue() == instr->Rs1Value()) {
+ // int32_t imm = Assembler::BrachlongOffset((instr -
+ // 4)->InstructionBits(),
+ // instr->InstructionBits());
+ // const char* target =
+ // converter_.NameOfAddress(reinterpret_cast<byte*>(instr - 4) + imm);
+ // out_buffer_pos_ +=
+ // SNPrintF(out_buffer_ + out_buffer_pos_, " -> %s", target);
+ // return;
+ // }
+ // }
+}
+
+void Decoder::PrintBranchOffset(Instruction* instr) {
+ int32_t imm = instr->BranchOffset();
+ const char* target =
+ converter_.NameOfAddress(reinterpret_cast<byte*>(instr) + imm);
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "%d -> %s", imm, target);
+}
+
+void Decoder::PrintStoreOffset(Instruction* instr) {
+ int32_t imm = instr->StoreOffset();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
+}
+
+void Decoder::PrintRvvSEW(Instruction* instr) {
+ const char* sew = instr->RvvSEW();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%s", sew);
+}
+
+void Decoder::PrintRvvLMUL(Instruction* instr) {
+ const char* lmul = instr->RvvLMUL();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%s", lmul);
+}
+
+void Decoder::PrintRvvSimm5(Instruction* instr) {
+ const int simm5 = instr->RvvSimm5();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", simm5);
+}
+
+void Decoder::PrintRvvUimm5(Instruction* instr) {
+ const uint32_t uimm5 = instr->RvvUimm5();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%u", uimm5);
+}
+
+void Decoder::PrintImm20U(Instruction* instr) {
+ int32_t imm = instr->Imm20UValue();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm);
+}
+
+void Decoder::PrintImm20J(Instruction* instr) {
+ int32_t imm = instr->Imm20JValue();
+ const char* target =
+ converter_.NameOfAddress(reinterpret_cast<byte*>(instr) + imm);
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "%d -> %s", imm, target);
+}
+
+void Decoder::PrintShamt(Instruction* instr) {
+ int32_t imm = instr->Shamt();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
+}
+
+void Decoder::PrintShamt32(Instruction* instr) {
+ int32_t imm = instr->Shamt32();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
+}
+
+void Decoder::PrintRvcImm6(Instruction* instr) {
+ int32_t imm = instr->RvcImm6Value();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
+}
+
+void Decoder::PrintRvcImm6U(Instruction* instr) {
+ int32_t imm = instr->RvcImm6Value() & 0xFFFFF;
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm);
+}
+
+void Decoder::PrintRvcImm6Addi16sp(Instruction* instr) {
+ int32_t imm = instr->RvcImm6Addi16spValue();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
+}
+
+void Decoder::PrintRvcShamt(Instruction* instr) {
+ int32_t imm = instr->RvcShamt6();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
+}
+
+void Decoder::PrintRvcImm6Ldsp(Instruction* instr) {
+ int32_t imm = instr->RvcImm6LdspValue();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
+}
+
+void Decoder::PrintRvcImm6Lwsp(Instruction* instr) {
+ int32_t imm = instr->RvcImm6LwspValue();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
+}
+
+void Decoder::PrintRvcImm6Swsp(Instruction* instr) {
+ int32_t imm = instr->RvcImm6SwspValue();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
+}
+
+void Decoder::PrintRvcImm6Sdsp(Instruction* instr) {
+ int32_t imm = instr->RvcImm6SdspValue();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
+}
+
+void Decoder::PrintRvcImm5W(Instruction* instr) {
+ int32_t imm = instr->RvcImm5WValue();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
+}
+
+void Decoder::PrintRvcImm5D(Instruction* instr) {
+ int32_t imm = instr->RvcImm5DValue();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
+}
+
+void Decoder::PrintRvcImm8Addi4spn(Instruction* instr) {
+ int32_t imm = instr->RvcImm8Addi4spnValue();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
+}
+
+void Decoder::PrintRvcImm11CJ(Instruction* instr) {
+ int32_t imm = instr->RvcImm11CJValue();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
+}
+
+void Decoder::PrintRvcImm8B(Instruction* instr) {
+ int32_t imm = instr->RvcImm8BValue();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
+}
+
+void Decoder::PrintRvvVm(Instruction* instr) {
+ uint8_t imm = instr->RvvVM();
+ if (imm == 0) {
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, " v0.t");
+ }
+}
+
+void Decoder::PrintAcquireRelease(Instruction* instr) {
+ bool aq = instr->AqValue();
+ bool rl = instr->RlValue();
+ if (aq || rl) {
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, ".");
+ }
+ if (aq) {
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "aq");
+ }
+ if (rl) {
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "rl");
+ }
+}
+
+void Decoder::PrintCSRReg(Instruction* instr) {
+ int32_t csr_reg = instr->CsrValue();
+ std::string s;
+ switch (csr_reg) {
+ case csr_fflags: // Floating-Point Accrued Exceptions (RW)
+ s = "csr_fflags";
+ break;
+ case csr_frm: // Floating-Point Dynamic Rounding Mode (RW)
+ s = "csr_frm";
+ break;
+ case csr_fcsr: // Floating-Point Control and Status Register (RW)
+ s = "csr_fcsr";
+ break;
+ case csr_cycle:
+ s = "csr_cycle";
+ break;
+ case csr_time:
+ s = "csr_time";
+ break;
+ case csr_instret:
+ s = "csr_instret";
+ break;
+ case csr_cycleh:
+ s = "csr_cycleh";
+ break;
+ case csr_timeh:
+ s = "csr_timeh";
+ break;
+ case csr_instreth:
+ s = "csr_instreth";
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%s", s.c_str());
+}
+
+void Decoder::PrintRoundingMode(Instruction* instr) {
+ int frm = instr->RoundMode();
+ std::string s;
+ switch (frm) {
+ case RNE:
+ s = "RNE";
+ break;
+ case RTZ:
+ s = "RTZ";
+ break;
+ case RDN:
+ s = "RDN";
+ break;
+ case RUP:
+ s = "RUP";
+ break;
+ case RMM:
+ s = "RMM";
+ break;
+ case DYN:
+ s = "DYN";
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%s", s.c_str());
+}
+
+void Decoder::PrintMemoryOrder(Instruction* instr, bool is_pred) {
+ int memOrder = instr->MemoryOrder(is_pred);
+ std::string s;
+ if ((memOrder & PSI) == PSI) {
+ s += "i";
+ }
+ if ((memOrder & PSO) == PSO) {
+ s += "o";
+ }
+ if ((memOrder & PSR) == PSR) {
+ s += "r";
+ }
+ if ((memOrder & PSW) == PSW) {
+ s += "w";
+ }
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%s", s.c_str());
+}
+
+// Printing of instruction name.
+void Decoder::PrintInstructionName(Instruction* instr) {}
+
+// RISCV Instruction Decode Routine
+void Decoder::DecodeRType(Instruction* instr) {
+ switch (instr->InstructionBits() & kRTypeMask) {
+ case RO_ADD:
+ Format(instr, "add 'rd, 'rs1, 'rs2");
+ break;
+ case RO_SUB:
+ if (instr->Rs1Value() == zero.code())
+ Format(instr, "neg 'rd, 'rs2");
+ else
+ Format(instr, "sub 'rd, 'rs1, 'rs2");
+ break;
+ case RO_SLL:
+ Format(instr, "sll 'rd, 'rs1, 'rs2");
+ break;
+ case RO_SLT:
+ if (instr->Rs2Value() == zero.code())
+ Format(instr, "sltz 'rd, 'rs1");
+ else if (instr->Rs1Value() == zero.code())
+ Format(instr, "sgtz 'rd, 'rs2");
+ else
+ Format(instr, "slt 'rd, 'rs1, 'rs2");
+ break;
+ case RO_SLTU:
+ if (instr->Rs1Value() == zero.code())
+ Format(instr, "snez 'rd, 'rs2");
+ else
+ Format(instr, "sltu 'rd, 'rs1, 'rs2");
+ break;
+ case RO_XOR:
+ Format(instr, "xor 'rd, 'rs1, 'rs2");
+ break;
+ case RO_SRL:
+ Format(instr, "srl 'rd, 'rs1, 'rs2");
+ break;
+ case RO_SRA:
+ Format(instr, "sra 'rd, 'rs1, 'rs2");
+ break;
+ case RO_OR:
+ Format(instr, "or 'rd, 'rs1, 'rs2");
+ break;
+ case RO_AND:
+ Format(instr, "and 'rd, 'rs1, 'rs2");
+ break;
+#ifdef JS_CODEGEN_RISCV64
+ case RO_ADDW:
+ Format(instr, "addw 'rd, 'rs1, 'rs2");
+ break;
+ case RO_SUBW:
+ if (instr->Rs1Value() == zero.code())
+ Format(instr, "negw 'rd, 'rs2");
+ else
+ Format(instr, "subw 'rd, 'rs1, 'rs2");
+ break;
+ case RO_SLLW:
+ Format(instr, "sllw 'rd, 'rs1, 'rs2");
+ break;
+ case RO_SRLW:
+ Format(instr, "srlw 'rd, 'rs1, 'rs2");
+ break;
+ case RO_SRAW:
+ Format(instr, "sraw 'rd, 'rs1, 'rs2");
+ break;
+#endif /* JS_CODEGEN_RISCV64 */
+ // TODO(riscv): Add RISCV M extension macro
+ case RO_MUL:
+ Format(instr, "mul 'rd, 'rs1, 'rs2");
+ break;
+ case RO_MULH:
+ Format(instr, "mulh 'rd, 'rs1, 'rs2");
+ break;
+ case RO_MULHSU:
+ Format(instr, "mulhsu 'rd, 'rs1, 'rs2");
+ break;
+ case RO_MULHU:
+ Format(instr, "mulhu 'rd, 'rs1, 'rs2");
+ break;
+ case RO_DIV:
+ Format(instr, "div 'rd, 'rs1, 'rs2");
+ break;
+ case RO_DIVU:
+ Format(instr, "divu 'rd, 'rs1, 'rs2");
+ break;
+ case RO_REM:
+ Format(instr, "rem 'rd, 'rs1, 'rs2");
+ break;
+ case RO_REMU:
+ Format(instr, "remu 'rd, 'rs1, 'rs2");
+ break;
+#ifdef JS_CODEGEN_RISCV64
+ case RO_MULW:
+ Format(instr, "mulw 'rd, 'rs1, 'rs2");
+ break;
+ case RO_DIVW:
+ Format(instr, "divw 'rd, 'rs1, 'rs2");
+ break;
+ case RO_DIVUW:
+ Format(instr, "divuw 'rd, 'rs1, 'rs2");
+ break;
+ case RO_REMW:
+ Format(instr, "remw 'rd, 'rs1, 'rs2");
+ break;
+ case RO_REMUW:
+ Format(instr, "remuw 'rd, 'rs1, 'rs2");
+ break;
+#endif /*JS_CODEGEN_RISCV64*/
+ // TODO(riscv): End Add RISCV M extension macro
+ default: {
+ switch (instr->BaseOpcode()) {
+ case AMO:
+ DecodeRAType(instr);
+ break;
+ case OP_FP:
+ DecodeRFPType(instr);
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
+ }
+ }
+}
+
+void Decoder::DecodeRAType(Instruction* instr) {
+ // TODO(riscv): Add macro for RISCV A extension
+ // Special handling for A extension instructions because it uses func5
+ // For all A extension instruction, V8 simulator is pure sequential. No
+ // Memory address lock or other synchronizaiton behaviors.
+ switch (instr->InstructionBits() & kRATypeMask) {
+ case RO_LR_W:
+ Format(instr, "lr.w'a 'rd, ('rs1)");
+ break;
+ case RO_SC_W:
+ Format(instr, "sc.w'a 'rd, 'rs2, ('rs1)");
+ break;
+ case RO_AMOSWAP_W:
+ Format(instr, "amoswap.w'a 'rd, 'rs2, ('rs1)");
+ break;
+ case RO_AMOADD_W:
+ Format(instr, "amoadd.w'a 'rd, 'rs2, ('rs1)");
+ break;
+ case RO_AMOXOR_W:
+ Format(instr, "amoxor.w'a 'rd, 'rs2, ('rs1)");
+ break;
+ case RO_AMOAND_W:
+ Format(instr, "amoand.w'a 'rd, 'rs2, ('rs1)");
+ break;
+ case RO_AMOOR_W:
+ Format(instr, "amoor.w'a 'rd, 'rs2, ('rs1)");
+ break;
+ case RO_AMOMIN_W:
+ Format(instr, "amomin.w'a 'rd, 'rs2, ('rs1)");
+ break;
+ case RO_AMOMAX_W:
+ Format(instr, "amomax.w'a 'rd, 'rs2, ('rs1)");
+ break;
+ case RO_AMOMINU_W:
+ Format(instr, "amominu.w'a 'rd, 'rs2, ('rs1)");
+ break;
+ case RO_AMOMAXU_W:
+ Format(instr, "amomaxu.w'a 'rd, 'rs2, ('rs1)");
+ break;
+#ifdef JS_CODEGEN_RISCV64
+ case RO_LR_D:
+ Format(instr, "lr.d'a 'rd, ('rs1)");
+ break;
+ case RO_SC_D:
+ Format(instr, "sc.d'a 'rd, 'rs2, ('rs1)");
+ break;
+ case RO_AMOSWAP_D:
+ Format(instr, "amoswap.d'a 'rd, 'rs2, ('rs1)");
+ break;
+ case RO_AMOADD_D:
+ Format(instr, "amoadd.d'a 'rd, 'rs2, ('rs1)");
+ break;
+ case RO_AMOXOR_D:
+ Format(instr, "amoxor.d'a 'rd, 'rs2, ('rs1)");
+ break;
+ case RO_AMOAND_D:
+ Format(instr, "amoand.d'a 'rd, 'rs2, ('rs1)");
+ break;
+ case RO_AMOOR_D:
+ Format(instr, "amoor.d'a 'rd, 'rs2, ('rs1)");
+ break;
+ case RO_AMOMIN_D:
+ Format(instr, "amomin.d'a 'rd, 'rs2, ('rs1)");
+ break;
+ case RO_AMOMAX_D:
+ Format(instr, "amoswap.d'a 'rd, 'rs2, ('rs1)");
+ break;
+ case RO_AMOMINU_D:
+ Format(instr, "amominu.d'a 'rd, 'rs2, ('rs1)");
+ break;
+ case RO_AMOMAXU_D:
+ Format(instr, "amomaxu.d'a 'rd, 'rs2, ('rs1)");
+ break;
+#endif /*JS_CODEGEN_RISCV64*/
+ // TODO(riscv): End Add macro for RISCV A extension
+ default: {
+ UNSUPPORTED_RISCV();
+ }
+ }
+}
+
+void Decoder::DecodeRFPType(Instruction* instr) {
+ // OP_FP instructions (F/D) uses func7 first. Some further uses fun3 and rs2()
+
+ // kRATypeMask is only for func7
+ switch (instr->InstructionBits() & kRFPTypeMask) {
+ // TODO(riscv): Add macro for RISCV F extension
+ case RO_FADD_S:
+ Format(instr, "fadd.s 'fd, 'fs1, 'fs2");
+ break;
+ case RO_FSUB_S:
+ Format(instr, "fsub.s 'fd, 'fs1, 'fs2");
+ break;
+ case RO_FMUL_S:
+ Format(instr, "fmul.s 'fd, 'fs1, 'fs2");
+ break;
+ case RO_FDIV_S:
+ Format(instr, "fdiv.s 'fd, 'fs1, 'fs2");
+ break;
+ case RO_FSQRT_S:
+ Format(instr, "fsqrt.s 'fd, 'fs1");
+ break;
+ case RO_FSGNJ_S: { // RO_FSGNJN_S RO_FSGNJX_S
+ switch (instr->Funct3Value()) {
+ case 0b000: // RO_FSGNJ_S
+ if (instr->Rs1Value() == instr->Rs2Value())
+ Format(instr, "fmv.s 'fd, 'fs1");
+ else
+ Format(instr, "fsgnj.s 'fd, 'fs1, 'fs2");
+ break;
+ case 0b001: // RO_FSGNJN_S
+ if (instr->Rs1Value() == instr->Rs2Value())
+ Format(instr, "fneg.s 'fd, 'fs1");
+ else
+ Format(instr, "fsgnjn.s 'fd, 'fs1, 'fs2");
+ break;
+ case 0b010: // RO_FSGNJX_S
+ if (instr->Rs1Value() == instr->Rs2Value())
+ Format(instr, "fabs.s 'fd, 'fs1");
+ else
+ Format(instr, "fsgnjx.s 'fd, 'fs1, 'fs2");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ }
+ case RO_FMIN_S: { // RO_FMAX_S
+ switch (instr->Funct3Value()) {
+ case 0b000: // RO_FMIN_S
+ Format(instr, "fmin.s 'fd, 'fs1, 'fs2");
+ break;
+ case 0b001: // RO_FMAX_S
+ Format(instr, "fmax.s 'fd, 'fs1, 'fs2");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ }
+ case RO_FCVT_W_S: { // RO_FCVT_WU_S , 64F RO_FCVT_L_S RO_FCVT_LU_S
+ switch (instr->Rs2Value()) {
+ case 0b00000: // RO_FCVT_W_S
+ Format(instr, "fcvt.w.s ['frm] 'rd, 'fs1");
+ break;
+ case 0b00001: // RO_FCVT_WU_S
+ Format(instr, "fcvt.wu.s ['frm] 'rd, 'fs1");
+ break;
+#ifdef JS_CODEGEN_RISCV64
+ case 0b00010: // RO_FCVT_L_S
+ Format(instr, "fcvt.l.s ['frm] 'rd, 'fs1");
+ break;
+ case 0b00011: // RO_FCVT_LU_S
+ Format(instr, "fcvt.lu.s ['frm] 'rd, 'fs1");
+ break;
+#endif /* JS_CODEGEN_RISCV64 */
+ default:
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ }
+ case RO_FMV: { // RO_FCLASS_S
+ if (instr->Rs2Value() != 0b00000) {
+ UNSUPPORTED_RISCV();
+ }
+ switch (instr->Funct3Value()) {
+ case 0b000: // RO_FMV_X_W
+ Format(instr, "fmv.x.w 'rd, 'fs1");
+ break;
+ case 0b001: // RO_FCLASS_S
+ Format(instr, "fclass.s 'rd, 'fs1");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ }
+ case RO_FLE_S: { // RO_FEQ_S RO_FLT_S RO_FLE_S
+ switch (instr->Funct3Value()) {
+ case 0b010: // RO_FEQ_S
+ Format(instr, "feq.s 'rd, 'fs1, 'fs2");
+ break;
+ case 0b001: // RO_FLT_S
+ Format(instr, "flt.s 'rd, 'fs1, 'fs2");
+ break;
+ case 0b000: // RO_FLE_S
+ Format(instr, "fle.s 'rd, 'fs1, 'fs2");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ }
+ case RO_FCVT_S_W: { // RO_FCVT_S_WU , 64F RO_FCVT_S_L RO_FCVT_S_LU
+ switch (instr->Rs2Value()) {
+ case 0b00000: // RO_FCVT_S_W
+ Format(instr, "fcvt.s.w 'fd, 'rs1");
+ break;
+ case 0b00001: // RO_FCVT_S_WU
+ Format(instr, "fcvt.s.wu 'fd, 'rs1");
+ break;
+#ifdef JS_CODEGEN_RISCV64
+ case 0b00010: // RO_FCVT_S_L
+ Format(instr, "fcvt.s.l 'fd, 'rs1");
+ break;
+ case 0b00011: // RO_FCVT_S_LU
+ Format(instr, "fcvt.s.lu 'fd, 'rs1");
+ break;
+#endif /* JS_CODEGEN_RISCV64 */
+ default: {
+ UNSUPPORTED_RISCV();
+ }
+ }
+ break;
+ }
+ case RO_FMV_W_X: {
+ if (instr->Funct3Value() == 0b000) {
+ Format(instr, "fmv.w.x 'fd, 'rs1");
+ } else {
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ }
+ // TODO(riscv): Add macro for RISCV D extension
+ case RO_FADD_D:
+ Format(instr, "fadd.d 'fd, 'fs1, 'fs2");
+ break;
+ case RO_FSUB_D:
+ Format(instr, "fsub.d 'fd, 'fs1, 'fs2");
+ break;
+ case RO_FMUL_D:
+ Format(instr, "fmul.d 'fd, 'fs1, 'fs2");
+ break;
+ case RO_FDIV_D:
+ Format(instr, "fdiv.d 'fd, 'fs1, 'fs2");
+ break;
+ case RO_FSQRT_D: {
+ if (instr->Rs2Value() == 0b00000) {
+ Format(instr, "fsqrt.d 'fd, 'fs1");
+ } else {
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ }
+ case RO_FSGNJ_D: { // RO_FSGNJN_D RO_FSGNJX_D
+ switch (instr->Funct3Value()) {
+ case 0b000: // RO_FSGNJ_D
+ if (instr->Rs1Value() == instr->Rs2Value())
+ Format(instr, "fmv.d 'fd, 'fs1");
+ else
+ Format(instr, "fsgnj.d 'fd, 'fs1, 'fs2");
+ break;
+ case 0b001: // RO_FSGNJN_D
+ if (instr->Rs1Value() == instr->Rs2Value())
+ Format(instr, "fneg.d 'fd, 'fs1");
+ else
+ Format(instr, "fsgnjn.d 'fd, 'fs1, 'fs2");
+ break;
+ case 0b010: // RO_FSGNJX_D
+ if (instr->Rs1Value() == instr->Rs2Value())
+ Format(instr, "fabs.d 'fd, 'fs1");
+ else
+ Format(instr, "fsgnjx.d 'fd, 'fs1, 'fs2");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ }
+ case RO_FMIN_D: { // RO_FMAX_D
+ switch (instr->Funct3Value()) {
+ case 0b000: // RO_FMIN_D
+ Format(instr, "fmin.d 'fd, 'fs1, 'fs2");
+ break;
+ case 0b001: // RO_FMAX_D
+ Format(instr, "fmax.d 'fd, 'fs1, 'fs2");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ }
+ case (RO_FCVT_S_D & kRFPTypeMask): {
+ if (instr->Rs2Value() == 0b00001) {
+ Format(instr, "fcvt.s.d ['frm] 'fd, 'fs1");
+ } else {
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ }
+ case RO_FCVT_D_S: {
+ if (instr->Rs2Value() == 0b00000) {
+ Format(instr, "fcvt.d.s 'fd, 'fs1");
+ } else {
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ }
+ case RO_FLE_D: { // RO_FEQ_D RO_FLT_D RO_FLE_D
+ switch (instr->Funct3Value()) {
+ case 0b010: // RO_FEQ_S
+ Format(instr, "feq.d 'rd, 'fs1, 'fs2");
+ break;
+ case 0b001: // RO_FLT_D
+ Format(instr, "flt.d 'rd, 'fs1, 'fs2");
+ break;
+ case 0b000: // RO_FLE_D
+ Format(instr, "fle.d 'rd, 'fs1, 'fs2");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ }
+ case (RO_FCLASS_D & kRFPTypeMask): { // RO_FCLASS_D , 64D RO_FMV_X_D
+ if (instr->Rs2Value() != 0b00000) {
+ UNSUPPORTED_RISCV();
+ break;
+ }
+ switch (instr->Funct3Value()) {
+ case 0b001: // RO_FCLASS_D
+ Format(instr, "fclass.d 'rd, 'fs1");
+ break;
+#ifdef JS_CODEGEN_RISCV64
+ case 0b000: // RO_FMV_X_D
+ Format(instr, "fmv.x.d 'rd, 'fs1");
+ break;
+#endif /* JS_CODEGEN_RISCV64 */
+ default:
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ }
+ case RO_FCVT_W_D: { // RO_FCVT_WU_D , 64F RO_FCVT_L_D RO_FCVT_LU_D
+ switch (instr->Rs2Value()) {
+ case 0b00000: // RO_FCVT_W_D
+ Format(instr, "fcvt.w.d ['frm] 'rd, 'fs1");
+ break;
+ case 0b00001: // RO_FCVT_WU_D
+ Format(instr, "fcvt.wu.d ['frm] 'rd, 'fs1");
+ break;
+#ifdef JS_CODEGEN_RISCV64
+ case 0b00010: // RO_FCVT_L_D
+ Format(instr, "fcvt.l.d ['frm] 'rd, 'fs1");
+ break;
+ case 0b00011: // RO_FCVT_LU_D
+ Format(instr, "fcvt.lu.d ['frm] 'rd, 'fs1");
+ break;
+#endif /* JS_CODEGEN_RISCV64 */
+ default:
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ }
+ case RO_FCVT_D_W: { // RO_FCVT_D_WU , 64F RO_FCVT_D_L RO_FCVT_D_LU
+ switch (instr->Rs2Value()) {
+ case 0b00000: // RO_FCVT_D_W
+ Format(instr, "fcvt.d.w 'fd, 'rs1");
+ break;
+ case 0b00001: // RO_FCVT_D_WU
+ Format(instr, "fcvt.d.wu 'fd, 'rs1");
+ break;
+#ifdef JS_CODEGEN_RISCV64
+ case 0b00010: // RO_FCVT_D_L
+ Format(instr, "fcvt.d.l 'fd, 'rs1");
+ break;
+ case 0b00011: // RO_FCVT_D_LU
+ Format(instr, "fcvt.d.lu 'fd, 'rs1");
+ break;
+#endif /* JS_CODEGEN_RISCV64 */
+ default:
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ }
+#ifdef JS_CODEGEN_RISCV64
+ case RO_FMV_D_X: {
+ if (instr->Funct3Value() == 0b000 && instr->Rs2Value() == 0b00000) {
+ Format(instr, "fmv.d.x 'fd, 'rs1");
+ } else {
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ }
+#endif /* JS_CODEGEN_RISCV64 */
+ default: {
+ UNSUPPORTED_RISCV();
+ }
+ }
+}
+
+void Decoder::DecodeR4Type(Instruction* instr) {
+ switch (instr->InstructionBits() & kR4TypeMask) {
+ // TODO(riscv): use F Extension macro block
+ case RO_FMADD_S:
+ Format(instr, "fmadd.s 'fd, 'fs1, 'fs2, 'fs3");
+ break;
+ case RO_FMSUB_S:
+ Format(instr, "fmsub.s 'fd, 'fs1, 'fs2, 'fs3");
+ break;
+ case RO_FNMSUB_S:
+ Format(instr, "fnmsub.s 'fd, 'fs1, 'fs2, 'fs3");
+ break;
+ case RO_FNMADD_S:
+ Format(instr, "fnmadd.s 'fd, 'fs1, 'fs2, 'fs3");
+ break;
+ // TODO(riscv): use F Extension macro block
+ case RO_FMADD_D:
+ Format(instr, "fmadd.d 'fd, 'fs1, 'fs2, 'fs3");
+ break;
+ case RO_FMSUB_D:
+ Format(instr, "fmsub.d 'fd, 'fs1, 'fs2, 'fs3");
+ break;
+ case RO_FNMSUB_D:
+ Format(instr, "fnmsub.d 'fd, 'fs1, 'fs2, 'fs3");
+ break;
+ case RO_FNMADD_D:
+ Format(instr, "fnmadd.d 'fd, 'fs1, 'fs2, 'fs3");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
+}
+
+void Decoder::DecodeIType(Instruction* instr) {
+ switch (instr->InstructionBits() & kITypeMask) {
+ case RO_JALR:
+ if (instr->RdValue() == zero.code() && instr->Rs1Value() == ra.code() &&
+ instr->Imm12Value() == 0)
+ Format(instr, "ret");
+ else if (instr->RdValue() == zero.code() && instr->Imm12Value() == 0)
+ Format(instr, "jr 'rs1");
+ else if (instr->RdValue() == ra.code() && instr->Imm12Value() == 0)
+ Format(instr, "jalr 'rs1");
+ else
+ Format(instr, "jalr 'rd, 'imm12('rs1)");
+ break;
+ case RO_LB:
+ Format(instr, "lb 'rd, 'imm12('rs1)");
+ break;
+ case RO_LH:
+ Format(instr, "lh 'rd, 'imm12('rs1)");
+ break;
+ case RO_LW:
+ Format(instr, "lw 'rd, 'imm12('rs1)");
+ break;
+ case RO_LBU:
+ Format(instr, "lbu 'rd, 'imm12('rs1)");
+ break;
+ case RO_LHU:
+ Format(instr, "lhu 'rd, 'imm12('rs1)");
+ break;
+#ifdef JS_CODEGEN_RISCV64
+ case RO_LWU:
+ Format(instr, "lwu 'rd, 'imm12('rs1)");
+ break;
+ case RO_LD:
+ Format(instr, "ld 'rd, 'imm12('rs1)");
+ break;
+#endif /*JS_CODEGEN_RISCV64*/
+ case RO_ADDI:
+ if (instr->Imm12Value() == 0) {
+ if (instr->RdValue() == zero.code() && instr->Rs1Value() == zero.code())
+ Format(instr, "nop");
+ else
+ Format(instr, "mv 'rd, 'rs1");
+ } else if (instr->Rs1Value() == zero.code()) {
+ Format(instr, "li 'rd, 'imm12");
+ } else {
+ Format(instr, "addi 'rd, 'rs1, 'imm12");
+ }
+ break;
+ case RO_SLTI:
+ Format(instr, "slti 'rd, 'rs1, 'imm12");
+ break;
+ case RO_SLTIU:
+ if (instr->Imm12Value() == 1)
+ Format(instr, "seqz 'rd, 'rs1");
+ else
+ Format(instr, "sltiu 'rd, 'rs1, 'imm12");
+ break;
+ case RO_XORI:
+ if (instr->Imm12Value() == -1)
+ Format(instr, "not 'rd, 'rs1");
+ else
+ Format(instr, "xori 'rd, 'rs1, 'imm12x");
+ break;
+ case RO_ORI:
+ Format(instr, "ori 'rd, 'rs1, 'imm12x");
+ break;
+ case RO_ANDI:
+ Format(instr, "andi 'rd, 'rs1, 'imm12x");
+ break;
+ case RO_SLLI:
+ Format(instr, "slli 'rd, 'rs1, 's64");
+ break;
+ case RO_SRLI: { // RO_SRAI
+ if (!instr->IsArithShift()) {
+ Format(instr, "srli 'rd, 'rs1, 's64");
+ } else {
+ Format(instr, "srai 'rd, 'rs1, 's64");
+ }
+ break;
+ }
+#ifdef JS_CODEGEN_RISCV64
+ case RO_ADDIW:
+ if (instr->Imm12Value() == 0)
+ Format(instr, "sext.w 'rd, 'rs1");
+ else
+ Format(instr, "addiw 'rd, 'rs1, 'imm12");
+ break;
+ case RO_SLLIW:
+ Format(instr, "slliw 'rd, 'rs1, 's32");
+ break;
+ case RO_SRLIW: { // RO_SRAIW
+ if (!instr->IsArithShift()) {
+ Format(instr, "srliw 'rd, 'rs1, 's32");
+ } else {
+ Format(instr, "sraiw 'rd, 'rs1, 's32");
+ }
+ break;
+ }
+#endif /*JS_CODEGEN_RISCV64*/
+ case RO_FENCE:
+ if (instr->MemoryOrder(true) == PSIORW &&
+ instr->MemoryOrder(false) == PSIORW)
+ Format(instr, "fence");
+ else
+ Format(instr, "fence 'pre, 'suc");
+ break;
+ case RO_ECALL: { // RO_EBREAK
+ if (instr->Imm12Value() == 0) { // ECALL
+ Format(instr, "ecall");
+ } else if (instr->Imm12Value() == 1) { // EBREAK
+ Format(instr, "ebreak");
+ } else {
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ }
+ // TODO(riscv): use Zifencei Standard Extension macro block
+ case RO_FENCE_I:
+ Format(instr, "fence.i");
+ break;
+ // TODO(riscv): use Zicsr Standard Extension macro block
+ // FIXME(RISC-V): Add special formatting for CSR registers
+ case RO_CSRRW:
+ if (instr->CsrValue() == csr_fcsr) {
+ if (instr->RdValue() == zero.code())
+ Format(instr, "fscsr 'rs1");
+ else
+ Format(instr, "fscsr 'rd, 'rs1");
+ } else if (instr->CsrValue() == csr_frm) {
+ if (instr->RdValue() == zero.code())
+ Format(instr, "fsrm 'rs1");
+ else
+ Format(instr, "fsrm 'rd, 'rs1");
+ } else if (instr->CsrValue() == csr_fflags) {
+ if (instr->RdValue() == zero.code())
+ Format(instr, "fsflags 'rs1");
+ else
+ Format(instr, "fsflags 'rd, 'rs1");
+ } else if (instr->RdValue() == zero.code()) {
+ Format(instr, "csrw 'csr, 'rs1");
+ } else {
+ Format(instr, "csrrw 'rd, 'csr, 'rs1");
+ }
+ break;
+ case RO_CSRRS:
+ if (instr->Rs1Value() == zero.code()) {
+ switch (instr->CsrValue()) {
+ case csr_instret:
+ Format(instr, "rdinstret 'rd");
+ break;
+ case csr_instreth:
+ Format(instr, "rdinstreth 'rd");
+ break;
+ case csr_time:
+ Format(instr, "rdtime 'rd");
+ break;
+ case csr_timeh:
+ Format(instr, "rdtimeh 'rd");
+ break;
+ case csr_cycle:
+ Format(instr, "rdcycle 'rd");
+ break;
+ case csr_cycleh:
+ Format(instr, "rdcycleh 'rd");
+ break;
+ case csr_fflags:
+ Format(instr, "frflags 'rd");
+ break;
+ case csr_frm:
+ Format(instr, "frrm 'rd");
+ break;
+ case csr_fcsr:
+ Format(instr, "frcsr 'rd");
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ } else if (instr->Rs1Value() == zero.code()) {
+ Format(instr, "csrr 'rd, 'csr");
+ } else if (instr->RdValue() == zero.code()) {
+ Format(instr, "csrs 'csr, 'rs1");
+ } else {
+ Format(instr, "csrrs 'rd, 'csr, 'rs1");
+ }
+ break;
+ case RO_CSRRC:
+ if (instr->RdValue() == zero.code())
+ Format(instr, "csrc 'csr, 'rs1");
+ else
+ Format(instr, "csrrc 'rd, 'csr, 'rs1");
+ break;
+ case RO_CSRRWI:
+ if (instr->RdValue() == zero.code())
+ Format(instr, "csrwi 'csr, 'uimm");
+ else
+ Format(instr, "csrrwi 'rd, 'csr, 'uimm");
+ break;
+ case RO_CSRRSI:
+ if (instr->RdValue() == zero.code())
+ Format(instr, "csrsi 'csr, 'uimm");
+ else
+ Format(instr, "csrrsi 'rd, 'csr, 'uimm");
+ break;
+ case RO_CSRRCI:
+ if (instr->RdValue() == zero.code())
+ Format(instr, "csrci 'csr, 'uimm");
+ else
+ Format(instr, "csrrci 'rd, 'csr, 'uimm");
+ break;
+ // TODO(riscv): use F Extension macro block
+ case RO_FLW:
+ Format(instr, "flw 'fd, 'imm12('rs1)");
+ break;
+ // TODO(riscv): use D Extension macro block
+ case RO_FLD:
+ Format(instr, "fld 'fd, 'imm12('rs1)");
+ break;
+ default:
+#ifdef CAN_USE_RVV_INSTRUCTIONS
+ if (instr->vl_vs_width() != -1) {
+ DecodeRvvVL(instr);
+ } else {
+ UNSUPPORTED_RISCV();
+ }
+ break;
+#else
+ UNSUPPORTED_RISCV();
+#endif
+ }
+}
+
+void Decoder::DecodeSType(Instruction* instr) {
+ switch (instr->InstructionBits() & kSTypeMask) {
+ case RO_SB:
+ Format(instr, "sb 'rs2, 'offS('rs1)");
+ break;
+ case RO_SH:
+ Format(instr, "sh 'rs2, 'offS('rs1)");
+ break;
+ case RO_SW:
+ Format(instr, "sw 'rs2, 'offS('rs1)");
+ break;
+#ifdef JS_CODEGEN_RISCV64
+ case RO_SD:
+ Format(instr, "sd 'rs2, 'offS('rs1)");
+ break;
+#endif /*JS_CODEGEN_RISCV64*/
+ // TODO(riscv): use F Extension macro block
+ case RO_FSW:
+ Format(instr, "fsw 'fs2, 'offS('rs1)");
+ break;
+ // TODO(riscv): use D Extension macro block
+ case RO_FSD:
+ Format(instr, "fsd 'fs2, 'offS('rs1)");
+ break;
+ default:
+#ifdef CAN_USE_RVV_INSTRUCTIONS
+ if (instr->vl_vs_width() != -1) {
+ DecodeRvvVS(instr);
+ } else {
+ UNSUPPORTED_RISCV();
+ }
+ break;
+#else
+ UNSUPPORTED_RISCV();
+#endif
+ }
+}
+
+void Decoder::DecodeBType(Instruction* instr) {
+ switch (instr->InstructionBits() & kBTypeMask) {
+ case RO_BEQ:
+ Format(instr, "beq 'rs1, 'rs2, 'offB");
+ break;
+ case RO_BNE:
+ Format(instr, "bne 'rs1, 'rs2, 'offB");
+ break;
+ case RO_BLT:
+ Format(instr, "blt 'rs1, 'rs2, 'offB");
+ break;
+ case RO_BGE:
+ Format(instr, "bge 'rs1, 'rs2, 'offB");
+ break;
+ case RO_BLTU:
+ Format(instr, "bltu 'rs1, 'rs2, 'offB");
+ break;
+ case RO_BGEU:
+ Format(instr, "bgeu 'rs1, 'rs2, 'offB");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
+}
+void Decoder::DecodeUType(Instruction* instr) {
+ // U Type doesn't have additional mask
+ switch (instr->BaseOpcodeFieldRaw()) {
+ case LUI:
+ Format(instr, "lui 'rd, 'imm20U");
+ break;
+ case AUIPC:
+ Format(instr, "auipc 'rd, 'imm20U");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
+}
+// namespace jit
+void Decoder::DecodeJType(Instruction* instr) {
+ // J Type doesn't have additional mask
+ switch (instr->BaseOpcodeValue()) {
+ case JAL:
+ if (instr->RdValue() == zero.code())
+ Format(instr, "j 'imm20J");
+ else if (instr->RdValue() == ra.code())
+ Format(instr, "jal 'imm20J");
+ else
+ Format(instr, "jal 'rd, 'imm20J");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
+}
+
+void Decoder::DecodeCRType(Instruction* instr) {
+ switch (instr->RvcFunct4Value()) {
+ case 0b1000:
+ if (instr->RvcRs1Value() != 0 && instr->RvcRs2Value() == 0)
+ Format(instr, "jr 'Crs1");
+ else if (instr->RvcRdValue() != 0 && instr->RvcRs2Value() != 0)
+ Format(instr, "mv 'Crd, 'Crs2");
+ else
+ UNSUPPORTED_RISCV();
+ break;
+ case 0b1001:
+ if (instr->RvcRs1Value() == 0 && instr->RvcRs2Value() == 0)
+ Format(instr, "ebreak");
+ else if (instr->RvcRdValue() != 0 && instr->RvcRs2Value() == 0)
+ Format(instr, "jalr 'Crs1");
+ else if (instr->RvcRdValue() != 0 && instr->RvcRs2Value() != 0)
+ Format(instr, "add 'Crd, 'Crd, 'Crs2");
+ else
+ UNSUPPORTED_RISCV();
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
+}
+
+void Decoder::DecodeCAType(Instruction* instr) {
+ switch (instr->InstructionBits() & kCATypeMask) {
+ case RO_C_SUB:
+ Format(instr, "sub 'Crs1s, 'Crs1s, 'Crs2s");
+ break;
+ case RO_C_XOR:
+ Format(instr, "xor 'Crs1s, 'Crs1s, 'Crs2s");
+ break;
+ case RO_C_OR:
+ Format(instr, "or 'Crs1s, 'Crs1s, 'Crs2s");
+ break;
+ case RO_C_AND:
+ Format(instr, "and 'Crs1s, 'Crs1s, 'Crs2s");
+ break;
+#ifdef JS_CODEGEN_RISCV64
+ case RO_C_SUBW:
+ Format(instr, "subw 'Crs1s, 'Crs1s, 'Crs2s");
+ break;
+ case RO_C_ADDW:
+ Format(instr, "addw 'Crs1s, 'Crs1s, 'Crs2s");
+ break;
+#endif
+ default:
+ UNSUPPORTED_RISCV();
+ }
+}
+
+void Decoder::DecodeCIType(Instruction* instr) {
+ switch (instr->RvcOpcode()) {
+ case RO_C_NOP_ADDI:
+ if (instr->RvcRdValue() == 0)
+ Format(instr, "nop");
+ else
+ Format(instr, "addi 'Crd, 'Crd, 'Cimm6");
+ break;
+#ifdef JS_CODEGEN_RISCV64
+ case RO_C_ADDIW:
+ Format(instr, "addiw 'Crd, 'Crd, 'Cimm6");
+ break;
+#endif
+ case RO_C_LI:
+ Format(instr, "li 'Crd, 'Cimm6");
+ break;
+ case RO_C_LUI_ADD:
+ if (instr->RvcRdValue() == 2)
+ Format(instr, "addi sp, sp, 'Cimm6Addi16sp");
+ else if (instr->RvcRdValue() != 0 && instr->RvcRdValue() != 2)
+ Format(instr, "lui 'Crd, 'Cimm6U");
+ else
+ UNSUPPORTED_RISCV();
+ break;
+ case RO_C_SLLI:
+ Format(instr, "slli 'Crd, 'Crd, 'Cshamt");
+ break;
+ case RO_C_FLDSP:
+ Format(instr, "fld 'Cfd, 'Cimm6Ldsp(sp)");
+ break;
+ case RO_C_LWSP:
+ Format(instr, "lw 'Crd, 'Cimm6Lwsp(sp)");
+ break;
+#ifdef JS_CODEGEN_RISCV64
+ case RO_C_LDSP:
+ Format(instr, "ld 'Crd, 'Cimm6Ldsp(sp)");
+ break;
+#elif defined(JS_CODEGEN_RISCV32)
+ case RO_C_FLWSP:
+ Format(instr, "flw 'Cfd, 'Cimm6Ldsp(sp)");
+ break;
+#endif
+ default:
+ UNSUPPORTED_RISCV();
+ }
+}
+
+void Decoder::DecodeCIWType(Instruction* instr) {
+ switch (instr->RvcOpcode()) {
+ case RO_C_ADDI4SPN:
+ Format(instr, "addi 'Crs2s, sp, 'Cimm8Addi4spn");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
+}
+
+void Decoder::DecodeCSSType(Instruction* instr) {
+ switch (instr->RvcOpcode()) {
+ case RO_C_SWSP:
+ Format(instr, "sw 'Crs2, 'Cimm6Swsp(sp)");
+ break;
+#ifdef JS_CODEGEN_RISCV64
+ case RO_C_SDSP:
+ Format(instr, "sd 'Crs2, 'Cimm6Sdsp(sp)");
+ break;
+#elif defined(JS_CODEGEN_RISCV32)
+ case RO_C_FSWSP:
+ Format(instr, "fsw 'Cfs2, 'Cimm6Sdsp(sp)");
+ break;
+#endif
+ case RO_C_FSDSP:
+ Format(instr, "fsd 'Cfs2, 'Cimm6Sdsp(sp)");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
+}
+
+void Decoder::DecodeCLType(Instruction* instr) {
+ switch (instr->RvcOpcode()) {
+ case RO_C_FLD:
+ Format(instr, "fld 'Cfs2s, 'Cimm5D('Crs1s)");
+ break;
+ case RO_C_LW:
+ Format(instr, "lw 'Crs2s, 'Cimm5W('Crs1s)");
+ break;
+#ifdef JS_CODEGEN_RISCV64
+ case RO_C_LD:
+ Format(instr, "ld 'Crs2s, 'Cimm5D('Crs1s)");
+ break;
+#elif defined(JS_CODEGEN_RISCV32)
+ case RO_C_FLW:
+ Format(instr, "fld 'Cfs2s, 'Cimm5D('Crs1s)");
+ break;
+#endif
+
+ default:
+ UNSUPPORTED_RISCV();
+ }
+}
+
+void Decoder::DecodeCSType(Instruction* instr) {
+ switch (instr->RvcOpcode()) {
+ case RO_C_FSD:
+ Format(instr, "fsd 'Cfs2s, 'Cimm5D('Crs1s)");
+ break;
+ case RO_C_SW:
+ Format(instr, "sw 'Crs2s, 'Cimm5W('Crs1s)");
+ break;
+#ifdef JS_CODEGEN_RISCV64
+ case RO_C_SD:
+ Format(instr, "sd 'Crs2s, 'Cimm5D('Crs1s)");
+ break;
+#elif defined(JS_CODEGEN_RISCV32)
+ case RO_C_FSW:
+ Format(instr, "fsw 'Cfs2s, 'Cimm5D('Crs1s)");
+ break;
+#endif
+ default:
+ UNSUPPORTED_RISCV();
+ }
+}
+
+void Decoder::DecodeCJType(Instruction* instr) {
+ switch (instr->RvcOpcode()) {
+ case RO_C_J:
+ Format(instr, "j 'Cimm11CJ");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
+}
+
+void Decoder::DecodeCBType(Instruction* instr) {
+ switch (instr->RvcOpcode()) {
+ case RO_C_BNEZ:
+ Format(instr, "bnez 'Crs1s, x0, 'Cimm8B");
+ break;
+ case RO_C_BEQZ:
+ Format(instr, "beqz 'Crs1s, x0, 'Cimm8B");
+ break;
+ case RO_C_MISC_ALU:
+ if (instr->RvcFunct2BValue() == 0b00)
+ Format(instr, "srli 'Crs1s, 'Crs1s, 'Cshamt");
+ else if (instr->RvcFunct2BValue() == 0b01)
+ Format(instr, "srai 'Crs1s, 'Crs1s, 'Cshamt");
+ else if (instr->RvcFunct2BValue() == 0b10)
+ Format(instr, "andi 'Crs1s, 'Crs1s, 'Cimm6");
+ else
+ UNSUPPORTED_RISCV();
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
+}
+
+#undef VERIFIY
+
+bool Decoder::IsConstantPoolAt(uint8_t* instr_ptr) {
+ UNSUPPORTED_RISCV();
+ MOZ_CRASH();
+}
+
+int Decoder::ConstantPoolSizeAt(uint8_t* instr_ptr) {
+ UNSUPPORTED_RISCV();
+ MOZ_CRASH();
+}
+
+// Disassemble the instruction at *instr_ptr into the output buffer.
+int Decoder::InstructionDecode(byte* instr_ptr) {
+ Instruction* instr = Instruction::At(instr_ptr);
+ // Print raw instruction bytes.
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%08x ",
+ instr->InstructionBits());
+ switch (instr->InstructionType()) {
+ case Instruction::kRType:
+ DecodeRType(instr);
+ break;
+ case Instruction::kR4Type:
+ DecodeR4Type(instr);
+ break;
+ case Instruction::kIType:
+ DecodeIType(instr);
+ break;
+ case Instruction::kSType:
+ DecodeSType(instr);
+ break;
+ case Instruction::kBType:
+ DecodeBType(instr);
+ break;
+ case Instruction::kUType:
+ DecodeUType(instr);
+ break;
+ case Instruction::kJType:
+ DecodeJType(instr);
+ break;
+ case Instruction::kCRType:
+ DecodeCRType(instr);
+ break;
+ case Instruction::kCAType:
+ DecodeCAType(instr);
+ break;
+ case Instruction::kCJType:
+ DecodeCJType(instr);
+ break;
+ case Instruction::kCIType:
+ DecodeCIType(instr);
+ break;
+ case Instruction::kCIWType:
+ DecodeCIWType(instr);
+ break;
+ case Instruction::kCSSType:
+ DecodeCSSType(instr);
+ break;
+ case Instruction::kCLType:
+ DecodeCLType(instr);
+ break;
+ case Instruction::kCSType:
+ DecodeCSType(instr);
+ break;
+ case Instruction::kCBType:
+ DecodeCBType(instr);
+ break;
+#ifdef CAN_USE_RVV_INSTRUCTIONS
+ case Instruction::kVType:
+ DecodeVType(instr);
+ break;
+#endif
+ default:
+ Format(instr, "UNSUPPORTED");
+ UNSUPPORTED_RISCV();
+ }
+ return instr->InstructionSize();
+}
+
+} // namespace disasm
+
+#undef STRING_STARTS_WITH
+#undef VERIFY
+
+//------------------------------------------------------------------------------
+
+namespace disasm {
+
+const char* NameConverter::NameOfAddress(uint8_t* addr) const {
+ SNPrintF(tmp_buffer_, "%p", addr);
+ return tmp_buffer_.start();
+}
+
+const char* NameConverter::NameOfConstant(uint8_t* addr) const {
+ return NameOfAddress(addr);
+}
+
+const char* NameConverter::NameOfCPURegister(int reg) const {
+ return Registers::GetName(reg);
+}
+
+const char* NameConverter::NameOfByteCPURegister(int reg) const {
+ MOZ_CRASH(" RISC-V does not have the concept of a byte register.");
+}
+
+const char* NameConverter::NameOfXMMRegister(int reg) const {
+ return FloatRegisters::GetName(reg);
+}
+
+const char* NameConverter::NameInCode(uint8_t* addr) const {
+ // The default name converter is called for unknown code. So we will not try
+ // to access any memory.
+ return "";
+}
+
+//------------------------------------------------------------------------------
+
+Disassembler::Disassembler(const NameConverter& converter)
+ : converter_(converter) {}
+
+Disassembler::~Disassembler() {}
+
+int Disassembler::InstructionDecode(V8Vector<char> buffer,
+ uint8_t* instruction) {
+ Decoder d(converter_, buffer);
+ return d.InstructionDecode(instruction);
+}
+
+int Disassembler::ConstantPoolSizeAt(uint8_t* instruction) {
+ return Decoder::ConstantPoolSizeAt(instruction);
+}
+
+void Disassembler::Disassemble(FILE* f, uint8_t* begin, uint8_t* end) {
+ NameConverter converter;
+ Disassembler d(converter);
+ for (uint8_t* pc = begin; pc < end;) {
+ EmbeddedVector<char, ReasonableBufferSize> buffer;
+ buffer[0] = '\0';
+ uint8_t* prev_pc = pc;
+ pc += d.InstructionDecode(buffer, pc);
+ fprintf(f, "%p %08x %s\n", prev_pc,
+ *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
+ }
+}
+
+} // namespace disasm
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/riscv64/disasm/Disasm-riscv64.h b/js/src/jit/riscv64/disasm/Disasm-riscv64.h
new file mode 100644
index 0000000000..0548523f6b
--- /dev/null
+++ b/js/src/jit/riscv64/disasm/Disasm-riscv64.h
@@ -0,0 +1,74 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ */
+// Copyright 2007-2008 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef jit_riscv64_disasm_Disasm_riscv64_h
+#define jit_riscv64_disasm_Disasm_riscv64_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Types.h"
+
+#include <stdio.h>
+
+#include "jit/riscv64/constant/Constant-riscv64.h"
+#include "jit/riscv64/constant/util-riscv64.h"
+namespace js {
+namespace jit {
+namespace disasm {
+
+typedef unsigned char byte;
+
+// Interface and default implementation for converting addresses and
+// register-numbers to text. The default implementation is machine
+// specific.
+class NameConverter {
+ public:
+ virtual ~NameConverter() {}
+ virtual const char* NameOfCPURegister(int reg) const;
+ virtual const char* NameOfByteCPURegister(int reg) const;
+ virtual const char* NameOfXMMRegister(int reg) const;
+ virtual const char* NameOfAddress(byte* addr) const;
+ virtual const char* NameOfConstant(byte* addr) const;
+ virtual const char* NameInCode(byte* addr) const;
+
+ protected:
+ EmbeddedVector<char, 128> tmp_buffer_;
+};
+
+// A generic Disassembler interface
+class Disassembler {
+ public:
+ // Caller deallocates converter.
+ explicit Disassembler(const NameConverter& converter);
+
+ virtual ~Disassembler();
+
+ // Writes one disassembled instruction into 'buffer' (0-terminated).
+ // Returns the length of the disassembled machine instruction in bytes.
+ int InstructionDecode(V8Vector<char> buffer, uint8_t* instruction);
+
+ // Returns -1 if instruction does not mark the beginning of a constant pool,
+ // or the number of entries in the constant pool beginning here.
+ int ConstantPoolSizeAt(byte* instruction);
+
+ // Write disassembly into specified file 'f' using specified NameConverter
+ // (see constructor).
+ static void Disassemble(FILE* f, uint8_t* begin, uint8_t* end);
+
+ private:
+ const NameConverter& converter_;
+
+ // Disallow implicit constructors.
+ Disassembler() = delete;
+ Disassembler(const Disassembler&) = delete;
+ void operator=(const Disassembler&) = delete;
+};
+
+} // namespace disasm
+} // namespace jit
+} // namespace js
+
+#endif // jit_riscv64_disasm_Disasm_riscv64_h
diff --git a/js/src/jit/riscv64/extension/base-assembler-riscv.cc b/js/src/jit/riscv64/extension/base-assembler-riscv.cc
new file mode 100644
index 0000000000..a64cc818b3
--- /dev/null
+++ b/js/src/jit/riscv64/extension/base-assembler-riscv.cc
@@ -0,0 +1,517 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2021 the V8 project authors. All rights reserved.
+
+#include "jit/riscv64/extension/base-assembler-riscv.h"
+#include "jit/riscv64/Architecture-riscv64.h"
+
+namespace js {
+namespace jit {
+
+int ToNumber(Register reg) {
+ MOZ_ASSERT(reg.code() < Registers::Total && reg.code() >= 0);
+ const int kNumbers[] = {
+ 0, // zero_reg
+ 1, // ra
+ 2, // sp
+ 3, // gp
+ 4, // tp
+ 5, // t0
+ 6, // t1
+ 7, // t2
+ 8, // s0/fp
+ 9, // s1
+ 10, // a0
+ 11, // a1
+ 12, // a2
+ 13, // a3
+ 14, // a4
+ 15, // a5
+ 16, // a6
+ 17, // a7
+ 18, // s2
+ 19, // s3
+ 20, // s4
+ 21, // s5
+ 22, // s6
+ 23, // s7
+ 24, // s8
+ 25, // s9
+ 26, // s10
+ 27, // s11
+ 28, // t3
+ 29, // t4
+ 30, // t5
+ 31, // t6
+ };
+ return kNumbers[reg.code()];
+}
+
+Register ToRegister(uint32_t num) {
+ MOZ_ASSERT(num >= 0 && num < Registers::Total);
+ const Register kRegisters[] = {
+ zero_reg, ra, sp, gp, tp, t0, t1, t2, fp, s1, a0, a1, a2, a3, a4, a5,
+ a6, a7, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, t3, t4, t5, t6};
+ return kRegisters[num];
+}
+
+// ----- Top-level instruction formats match those in the ISA manual
+// (R, I, S, B, U, J). These match the formats defined in the compiler
+void AssemblerRiscvBase::GenInstrR(uint8_t funct7, uint8_t funct3,
+ BaseOpcode opcode, Register rd, Register rs1,
+ Register rs2) {
+ MOZ_ASSERT(is_uint7(funct7) && is_uint3(funct3));
+ Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) |
+ (rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) |
+ (funct7 << kFunct7Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrR(uint8_t funct7, uint8_t funct3,
+ BaseOpcode opcode, FPURegister rd,
+ FPURegister rs1, FPURegister rs2) {
+ MOZ_ASSERT(is_uint7(funct7) && is_uint3(funct3));
+ Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) |
+ (rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) |
+ (funct7 << kFunct7Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrR(uint8_t funct7, uint8_t funct3,
+ BaseOpcode opcode, Register rd,
+ FPURegister rs1, Register rs2) {
+ MOZ_ASSERT(is_uint7(funct7) && is_uint3(funct3));
+ Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) |
+ (rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) |
+ (funct7 << kFunct7Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrR(uint8_t funct7, uint8_t funct3,
+ BaseOpcode opcode, FPURegister rd,
+ Register rs1, Register rs2) {
+ MOZ_ASSERT(is_uint7(funct7) && is_uint3(funct3));
+ Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) |
+ (rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) |
+ (funct7 << kFunct7Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrR(uint8_t funct7, uint8_t funct3,
+ BaseOpcode opcode, FPURegister rd,
+ FPURegister rs1, Register rs2) {
+ MOZ_ASSERT(is_uint7(funct7) && is_uint3(funct3));
+ Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) |
+ (rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) |
+ (funct7 << kFunct7Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrR(uint8_t funct7, uint8_t funct3,
+ BaseOpcode opcode, Register rd,
+ FPURegister rs1, FPURegister rs2) {
+ MOZ_ASSERT(is_uint7(funct7) && is_uint3(funct3));
+ Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) |
+ (rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) |
+ (funct7 << kFunct7Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrR4(uint8_t funct2, BaseOpcode opcode,
+ Register rd, Register rs1, Register rs2,
+ Register rs3, FPURoundingMode frm) {
+ MOZ_ASSERT(is_uint2(funct2) && is_uint3(frm));
+ Instr instr = opcode | (rd.code() << kRdShift) | (frm << kFunct3Shift) |
+ (rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) |
+ (funct2 << kFunct2Shift) | (rs3.code() << kRs3Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrR4(uint8_t funct2, BaseOpcode opcode,
+ FPURegister rd, FPURegister rs1,
+ FPURegister rs2, FPURegister rs3,
+ FPURoundingMode frm) {
+ MOZ_ASSERT(is_uint2(funct2) && is_uint3(frm));
+ Instr instr = opcode | (rd.code() << kRdShift) | (frm << kFunct3Shift) |
+ (rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) |
+ (funct2 << kFunct2Shift) | (rs3.code() << kRs3Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrRAtomic(uint8_t funct5, bool aq, bool rl,
+ uint8_t funct3, Register rd,
+ Register rs1, Register rs2) {
+ MOZ_ASSERT(is_uint5(funct5) && is_uint3(funct3));
+ Instr instr = AMO | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) |
+ (rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) |
+ (rl << kRlShift) | (aq << kAqShift) | (funct5 << kFunct5Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrRFrm(uint8_t funct7, BaseOpcode opcode,
+ Register rd, Register rs1, Register rs2,
+ FPURoundingMode frm) {
+ MOZ_ASSERT(is_uint3(frm));
+ Instr instr = opcode | (rd.code() << kRdShift) | (frm << kFunct3Shift) |
+ (rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) |
+ (funct7 << kFunct7Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrI(uint8_t funct3, BaseOpcode opcode,
+ Register rd, Register rs1, int16_t imm12) {
+ MOZ_ASSERT(is_uint3(funct3) && (is_uint12(imm12) || is_int12(imm12)));
+ Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) |
+ (rs1.code() << kRs1Shift) | (imm12 << kImm12Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrI(uint8_t funct3, BaseOpcode opcode,
+ FPURegister rd, Register rs1,
+ int16_t imm12) {
+ MOZ_ASSERT(is_uint3(funct3) && (is_uint12(imm12) || is_int12(imm12)));
+ Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) |
+ (rs1.code() << kRs1Shift) | (imm12 << kImm12Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrIShift(bool arithshift, uint8_t funct3,
+ BaseOpcode opcode, Register rd,
+ Register rs1, uint8_t shamt) {
+ MOZ_ASSERT(is_uint3(funct3) && is_uint6(shamt));
+ Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) |
+ (rs1.code() << kRs1Shift) | (shamt << kShamtShift) |
+ (arithshift << kArithShiftShift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrIShiftW(bool arithshift, uint8_t funct3,
+ BaseOpcode opcode, Register rd,
+ Register rs1, uint8_t shamt) {
+ MOZ_ASSERT(is_uint3(funct3) && is_uint5(shamt));
+ Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) |
+ (rs1.code() << kRs1Shift) | (shamt << kShamtWShift) |
+ (arithshift << kArithShiftShift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrS(uint8_t funct3, BaseOpcode opcode,
+ Register rs1, Register rs2, int16_t imm12) {
+ MOZ_ASSERT(is_uint3(funct3) && is_int12(imm12));
+ Instr instr = opcode | ((imm12 & 0x1f) << 7) | // bits 4-0
+ (funct3 << kFunct3Shift) | (rs1.code() << kRs1Shift) |
+ (rs2.code() << kRs2Shift) |
+ ((imm12 & 0xfe0) << 20); // bits 11-5
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrS(uint8_t funct3, BaseOpcode opcode,
+ Register rs1, FPURegister rs2,
+ int16_t imm12) {
+ MOZ_ASSERT(is_uint3(funct3) && is_int12(imm12));
+ Instr instr = opcode | ((imm12 & 0x1f) << 7) | // bits 4-0
+ (funct3 << kFunct3Shift) | (rs1.code() << kRs1Shift) |
+ (rs2.code() << kRs2Shift) |
+ ((imm12 & 0xfe0) << 20); // bits 11-5
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrB(uint8_t funct3, BaseOpcode opcode,
+ Register rs1, Register rs2, int16_t imm13) {
+ MOZ_ASSERT(is_uint3(funct3) && is_int13(imm13) && ((imm13 & 1) == 0));
+ Instr instr = opcode | ((imm13 & 0x800) >> 4) | // bit 11
+ ((imm13 & 0x1e) << 7) | // bits 4-1
+ (funct3 << kFunct3Shift) | (rs1.code() << kRs1Shift) |
+ (rs2.code() << kRs2Shift) |
+ ((imm13 & 0x7e0) << 20) | // bits 10-5
+ ((imm13 & 0x1000) << 19); // bit 12
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrU(BaseOpcode opcode, Register rd,
+ int32_t imm20) {
+ MOZ_ASSERT((is_int20(imm20) || is_uint20(imm20)));
+ Instr instr = opcode | (rd.code() << kRdShift) | (imm20 << kImm20Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrJ(BaseOpcode opcode, Register rd,
+ int32_t imm21) {
+ MOZ_ASSERT(is_int21(imm21) && ((imm21 & 1) == 0));
+ Instr instr = opcode | (rd.code() << kRdShift) |
+ (imm21 & 0xff000) | // bits 19-12
+ ((imm21 & 0x800) << 9) | // bit 11
+ ((imm21 & 0x7fe) << 20) | // bits 10-1
+ ((imm21 & 0x100000) << 11); // bit 20
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrCR(uint8_t funct4, BaseOpcode opcode,
+ Register rd, Register rs2) {
+ MOZ_ASSERT(is_uint4(funct4));
+ ShortInstr instr = opcode | (rs2.code() << kRvcRs2Shift) |
+ (rd.code() << kRvcRdShift) | (funct4 << kRvcFunct4Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrCA(uint8_t funct6, BaseOpcode opcode,
+ Register rd, uint8_t funct, Register rs2) {
+ MOZ_ASSERT(is_uint6(funct6) && is_uint2(funct));
+ ShortInstr instr = opcode | ((rs2.code() & 0x7) << kRvcRs2sShift) |
+ ((rd.code() & 0x7) << kRvcRs1sShift) |
+ (funct6 << kRvcFunct6Shift) | (funct << kRvcFunct2Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrCI(uint8_t funct3, BaseOpcode opcode,
+ Register rd, int8_t imm6) {
+ MOZ_ASSERT(is_uint3(funct3) && is_int6(imm6));
+ ShortInstr instr = opcode | ((imm6 & 0x1f) << 2) |
+ (rd.code() << kRvcRdShift) | ((imm6 & 0x20) << 7) |
+ (funct3 << kRvcFunct3Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrCIU(uint8_t funct3, BaseOpcode opcode,
+ Register rd, uint8_t uimm6) {
+ MOZ_ASSERT(is_uint3(funct3) && is_uint6(uimm6));
+ ShortInstr instr = opcode | ((uimm6 & 0x1f) << 2) |
+ (rd.code() << kRvcRdShift) | ((uimm6 & 0x20) << 7) |
+ (funct3 << kRvcFunct3Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrCIU(uint8_t funct3, BaseOpcode opcode,
+ FPURegister rd, uint8_t uimm6) {
+ MOZ_ASSERT(is_uint3(funct3) && is_uint6(uimm6));
+ ShortInstr instr = opcode | ((uimm6 & 0x1f) << 2) |
+ (rd.code() << kRvcRdShift) | ((uimm6 & 0x20) << 7) |
+ (funct3 << kRvcFunct3Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrCIW(uint8_t funct3, BaseOpcode opcode,
+ Register rd, uint8_t uimm8) {
+ MOZ_ASSERT(is_uint3(funct3) && is_uint8(uimm8));
+ ShortInstr instr = opcode | ((uimm8) << 5) |
+ ((rd.code() & 0x7) << kRvcRs2sShift) |
+ (funct3 << kRvcFunct3Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrCSS(uint8_t funct3, BaseOpcode opcode,
+ Register rs2, uint8_t uimm6) {
+ MOZ_ASSERT(is_uint3(funct3) && is_uint6(uimm6));
+ ShortInstr instr = opcode | (uimm6 << 7) | (rs2.code() << kRvcRs2Shift) |
+ (funct3 << kRvcFunct3Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrCSS(uint8_t funct3, BaseOpcode opcode,
+ FPURegister rs2, uint8_t uimm6) {
+ MOZ_ASSERT(is_uint3(funct3) && is_uint6(uimm6));
+ ShortInstr instr = opcode | (uimm6 << 7) | (rs2.code() << kRvcRs2Shift) |
+ (funct3 << kRvcFunct3Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrCL(uint8_t funct3, BaseOpcode opcode,
+ Register rd, Register rs1, uint8_t uimm5) {
+ MOZ_ASSERT(is_uint3(funct3) && is_uint5(uimm5));
+ ShortInstr instr = opcode | ((uimm5 & 0x3) << 5) |
+ ((rd.code() & 0x7) << kRvcRs2sShift) |
+ ((uimm5 & 0x1c) << 8) | (funct3 << kRvcFunct3Shift) |
+ ((rs1.code() & 0x7) << kRvcRs1sShift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrCL(uint8_t funct3, BaseOpcode opcode,
+ FPURegister rd, Register rs1,
+ uint8_t uimm5) {
+ MOZ_ASSERT(is_uint3(funct3) && is_uint5(uimm5));
+ ShortInstr instr = opcode | ((uimm5 & 0x3) << 5) |
+ ((rd.code() & 0x7) << kRvcRs2sShift) |
+ ((uimm5 & 0x1c) << 8) | (funct3 << kRvcFunct3Shift) |
+ ((rs1.code() & 0x7) << kRvcRs1sShift);
+ emit(instr);
+}
+void AssemblerRiscvBase::GenInstrCJ(uint8_t funct3, BaseOpcode opcode,
+ uint16_t uint11) {
+ MOZ_ASSERT(is_uint11(uint11));
+ ShortInstr instr = opcode | (funct3 << kRvcFunct3Shift) | (uint11 << 2);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrCS(uint8_t funct3, BaseOpcode opcode,
+ Register rs2, Register rs1, uint8_t uimm5) {
+ MOZ_ASSERT(is_uint3(funct3) && is_uint5(uimm5));
+ ShortInstr instr = opcode | ((uimm5 & 0x3) << 5) |
+ ((rs2.code() & 0x7) << kRvcRs2sShift) |
+ ((uimm5 & 0x1c) << 8) | (funct3 << kRvcFunct3Shift) |
+ ((rs1.code() & 0x7) << kRvcRs1sShift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrCS(uint8_t funct3, BaseOpcode opcode,
+ FPURegister rs2, Register rs1,
+ uint8_t uimm5) {
+ MOZ_ASSERT(is_uint3(funct3) && is_uint5(uimm5));
+ ShortInstr instr = opcode | ((uimm5 & 0x3) << 5) |
+ ((rs2.code() & 0x7) << kRvcRs2sShift) |
+ ((uimm5 & 0x1c) << 8) | (funct3 << kRvcFunct3Shift) |
+ ((rs1.code() & 0x7) << kRvcRs1sShift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrCB(uint8_t funct3, BaseOpcode opcode,
+ Register rs1, uint8_t uimm8) {
+ MOZ_ASSERT(is_uint3(funct3) && is_uint8(uimm8));
+ ShortInstr instr = opcode | ((uimm8 & 0x1f) << 2) | ((uimm8 & 0xe0) << 5) |
+ ((rs1.code() & 0x7) << kRvcRs1sShift) |
+ (funct3 << kRvcFunct3Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrCBA(uint8_t funct3, uint8_t funct2,
+ BaseOpcode opcode, Register rs1,
+ int8_t imm6) {
+ MOZ_ASSERT(is_uint3(funct3) && is_uint2(funct2) && is_int6(imm6));
+ ShortInstr instr = opcode | ((imm6 & 0x1f) << 2) | ((imm6 & 0x20) << 7) |
+ ((rs1.code() & 0x7) << kRvcRs1sShift) |
+ (funct3 << kRvcFunct3Shift) | (funct2 << 10);
+ emit(instr);
+}
+// ----- Instruction class templates match those in the compiler
+
+void AssemblerRiscvBase::GenInstrBranchCC_rri(uint8_t funct3, Register rs1,
+ Register rs2, int16_t imm13) {
+ GenInstrB(funct3, BRANCH, rs1, rs2, imm13);
+}
+
+void AssemblerRiscvBase::GenInstrLoad_ri(uint8_t funct3, Register rd,
+ Register rs1, int16_t imm12) {
+ GenInstrI(funct3, LOAD, rd, rs1, imm12);
+}
+
+void AssemblerRiscvBase::GenInstrStore_rri(uint8_t funct3, Register rs1,
+ Register rs2, int16_t imm12) {
+ GenInstrS(funct3, STORE, rs1, rs2, imm12);
+}
+
+void AssemblerRiscvBase::GenInstrALU_ri(uint8_t funct3, Register rd,
+ Register rs1, int16_t imm12) {
+ GenInstrI(funct3, OP_IMM, rd, rs1, imm12);
+}
+
+void AssemblerRiscvBase::GenInstrShift_ri(bool arithshift, uint8_t funct3,
+ Register rd, Register rs1,
+ uint8_t shamt) {
+ MOZ_ASSERT(is_uint6(shamt));
+ GenInstrI(funct3, OP_IMM, rd, rs1, (arithshift << 10) | shamt);
+}
+
+void AssemblerRiscvBase::GenInstrALU_rr(uint8_t funct7, uint8_t funct3,
+ Register rd, Register rs1,
+ Register rs2) {
+ GenInstrR(funct7, funct3, OP, rd, rs1, rs2);
+}
+
+void AssemblerRiscvBase::GenInstrCSR_ir(uint8_t funct3, Register rd,
+ ControlStatusReg csr, Register rs1) {
+ GenInstrI(funct3, SYSTEM, rd, rs1, csr);
+}
+
+void AssemblerRiscvBase::GenInstrCSR_ii(uint8_t funct3, Register rd,
+ ControlStatusReg csr, uint8_t imm5) {
+ GenInstrI(funct3, SYSTEM, rd, ToRegister(imm5), csr);
+}
+
+void AssemblerRiscvBase::GenInstrShiftW_ri(bool arithshift, uint8_t funct3,
+ Register rd, Register rs1,
+ uint8_t shamt) {
+ GenInstrIShiftW(arithshift, funct3, OP_IMM_32, rd, rs1, shamt);
+}
+
+void AssemblerRiscvBase::GenInstrALUW_rr(uint8_t funct7, uint8_t funct3,
+ Register rd, Register rs1,
+ Register rs2) {
+ GenInstrR(funct7, funct3, OP_32, rd, rs1, rs2);
+}
+
+void AssemblerRiscvBase::GenInstrPriv(uint8_t funct7, Register rs1,
+ Register rs2) {
+ GenInstrR(funct7, 0b000, SYSTEM, ToRegister(0UL), rs1, rs2);
+}
+
+void AssemblerRiscvBase::GenInstrLoadFP_ri(uint8_t funct3, FPURegister rd,
+ Register rs1, int16_t imm12) {
+ GenInstrI(funct3, LOAD_FP, rd, rs1, imm12);
+}
+
+void AssemblerRiscvBase::GenInstrStoreFP_rri(uint8_t funct3, Register rs1,
+ FPURegister rs2, int16_t imm12) {
+ GenInstrS(funct3, STORE_FP, rs1, rs2, imm12);
+}
+
+void AssemblerRiscvBase::GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3,
+ FPURegister rd, FPURegister rs1,
+ FPURegister rs2) {
+ GenInstrR(funct7, funct3, OP_FP, rd, rs1, rs2);
+}
+
+void AssemblerRiscvBase::GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3,
+ FPURegister rd, Register rs1,
+ Register rs2) {
+ GenInstrR(funct7, funct3, OP_FP, rd, rs1, rs2);
+}
+
+void AssemblerRiscvBase::GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3,
+ FPURegister rd, FPURegister rs1,
+ Register rs2) {
+ GenInstrR(funct7, funct3, OP_FP, rd, rs1, rs2);
+}
+
+void AssemblerRiscvBase::GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3,
+ Register rd, FPURegister rs1,
+ Register rs2) {
+ GenInstrR(funct7, funct3, OP_FP, rd, rs1, rs2);
+}
+
+void AssemblerRiscvBase::GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3,
+ Register rd, FPURegister rs1,
+ FPURegister rs2) {
+ GenInstrR(funct7, funct3, OP_FP, rd, rs1, rs2);
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/riscv64/extension/base-assembler-riscv.h b/js/src/jit/riscv64/extension/base-assembler-riscv.h
new file mode 100644
index 0000000000..cb3083d365
--- /dev/null
+++ b/js/src/jit/riscv64/extension/base-assembler-riscv.h
@@ -0,0 +1,219 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2021 the V8 project authors. All rights reserved.
+
+#ifndef jit_riscv64_extension_Base_assembler_riscv_h
+#define jit_riscv64_extension_Base_assembler_riscv_h
+
+#include <memory>
+#include <set>
+#include <stdio.h>
+
+#include "jit/Label.h"
+#include "jit/riscv64/Architecture-riscv64.h"
+#include "jit/riscv64/constant/Constant-riscv64.h"
+#include "jit/riscv64/Register-riscv64.h"
+
+#define xlen (uint8_t(sizeof(void*) * 8))
+
+#define kBitsPerByte 8UL
+// Check number width.
+inline constexpr bool is_intn(int64_t x, unsigned n) {
+ MOZ_ASSERT((0 < n) && (n < 64));
+ int64_t limit = static_cast<int64_t>(1) << (n - 1);
+ return (-limit <= x) && (x < limit);
+}
+
+inline constexpr bool is_uintn(int64_t x, unsigned n) {
+ MOZ_ASSERT((0 < n) && (n < (sizeof(x) * kBitsPerByte)));
+ return !(x >> n);
+}
+#undef kBitsPerByte
+// clang-format off
+#define INT_1_TO_63_LIST(V) \
+ V(1) V(2) V(3) V(4) V(5) V(6) V(7) V(8) V(9) V(10) \
+ V(11) V(12) V(13) V(14) V(15) V(16) V(17) V(18) V(19) V(20) \
+ V(21) V(22) V(23) V(24) V(25) V(26) V(27) V(28) V(29) V(30) \
+ V(31) V(32) V(33) V(34) V(35) V(36) V(37) V(38) V(39) V(40) \
+ V(41) V(42) V(43) V(44) V(45) V(46) V(47) V(48) V(49) V(50) \
+ V(51) V(52) V(53) V(54) V(55) V(56) V(57) V(58) V(59) V(60) \
+ V(61) V(62) V(63)
+// clang-format on
+
+#define DECLARE_IS_INT_N(N) \
+ inline constexpr bool is_int##N(int64_t x) { return is_intn(x, N); }
+
+#define DECLARE_IS_UINT_N(N) \
+ template <class T> \
+ inline constexpr bool is_uint##N(T x) { \
+ return is_uintn(x, N); \
+ }
+INT_1_TO_63_LIST(DECLARE_IS_INT_N)
+INT_1_TO_63_LIST(DECLARE_IS_UINT_N)
+
+#undef DECLARE_IS_INT_N
+#undef INT_1_TO_63_LIST
+
+namespace js {
+namespace jit {
+
+typedef FloatRegister FPURegister;
+#define zero_reg zero
+
+#define DEBUG_PRINTF(...) \
+ if (FLAG_riscv_debug) { \
+ std::printf(__VA_ARGS__); \
+ }
+
+int ToNumber(Register reg);
+Register ToRegister(uint32_t num);
+
+class AssemblerRiscvBase {
+ protected:
+ virtual int32_t branch_offset_helper(Label* L, OffsetSize bits) = 0;
+
+ virtual void emit(Instr x) = 0;
+ virtual void emit(ShortInstr x) = 0;
+ virtual void emit(uint64_t x) = 0;
+ virtual uint32_t currentOffset() = 0;
+ // Instruction generation.
+
+ // ----- Top-level instruction formats match those in the ISA manual
+ // (R, I, S, B, U, J). These match the formats defined in LLVM's
+ // RISCVInstrFormats.td.
+ void GenInstrR(uint8_t funct7, uint8_t funct3, BaseOpcode opcode, Register rd,
+ Register rs1, Register rs2);
+ void GenInstrR(uint8_t funct7, uint8_t funct3, BaseOpcode opcode,
+ FPURegister rd, FPURegister rs1, FPURegister rs2);
+ void GenInstrR(uint8_t funct7, uint8_t funct3, BaseOpcode opcode, Register rd,
+ FPURegister rs1, Register rs2);
+ void GenInstrR(uint8_t funct7, uint8_t funct3, BaseOpcode opcode,
+ FPURegister rd, Register rs1, Register rs2);
+ void GenInstrR(uint8_t funct7, uint8_t funct3, BaseOpcode opcode,
+ FPURegister rd, FPURegister rs1, Register rs2);
+ void GenInstrR(uint8_t funct7, uint8_t funct3, BaseOpcode opcode, Register rd,
+ FPURegister rs1, FPURegister rs2);
+ void GenInstrR4(uint8_t funct2, BaseOpcode opcode, Register rd, Register rs1,
+ Register rs2, Register rs3, FPURoundingMode frm);
+ void GenInstrR4(uint8_t funct2, BaseOpcode opcode, FPURegister rd,
+ FPURegister rs1, FPURegister rs2, FPURegister rs3,
+ FPURoundingMode frm);
+ void GenInstrRAtomic(uint8_t funct5, bool aq, bool rl, uint8_t funct3,
+ Register rd, Register rs1, Register rs2);
+ void GenInstrRFrm(uint8_t funct7, BaseOpcode opcode, Register rd,
+ Register rs1, Register rs2, FPURoundingMode frm);
+ void GenInstrI(uint8_t funct3, BaseOpcode opcode, Register rd, Register rs1,
+ int16_t imm12);
+ void GenInstrI(uint8_t funct3, BaseOpcode opcode, FPURegister rd,
+ Register rs1, int16_t imm12);
+ void GenInstrIShift(bool arithshift, uint8_t funct3, BaseOpcode opcode,
+ Register rd, Register rs1, uint8_t shamt);
+ void GenInstrIShiftW(bool arithshift, uint8_t funct3, BaseOpcode opcode,
+ Register rd, Register rs1, uint8_t shamt);
+ void GenInstrS(uint8_t funct3, BaseOpcode opcode, Register rs1, Register rs2,
+ int16_t imm12);
+ void GenInstrS(uint8_t funct3, BaseOpcode opcode, Register rs1,
+ FPURegister rs2, int16_t imm12);
+ void GenInstrB(uint8_t funct3, BaseOpcode opcode, Register rs1, Register rs2,
+ int16_t imm12);
+ void GenInstrU(BaseOpcode opcode, Register rd, int32_t imm20);
+ void GenInstrJ(BaseOpcode opcode, Register rd, int32_t imm20);
+ void GenInstrCR(uint8_t funct4, BaseOpcode opcode, Register rd, Register rs2);
+ void GenInstrCA(uint8_t funct6, BaseOpcode opcode, Register rd, uint8_t funct,
+ Register rs2);
+ void GenInstrCI(uint8_t funct3, BaseOpcode opcode, Register rd, int8_t imm6);
+ void GenInstrCIU(uint8_t funct3, BaseOpcode opcode, Register rd,
+ uint8_t uimm6);
+ void GenInstrCIU(uint8_t funct3, BaseOpcode opcode, FPURegister rd,
+ uint8_t uimm6);
+ void GenInstrCIW(uint8_t funct3, BaseOpcode opcode, Register rd,
+ uint8_t uimm8);
+ void GenInstrCSS(uint8_t funct3, BaseOpcode opcode, FPURegister rs2,
+ uint8_t uimm6);
+ void GenInstrCSS(uint8_t funct3, BaseOpcode opcode, Register rs2,
+ uint8_t uimm6);
+ void GenInstrCL(uint8_t funct3, BaseOpcode opcode, Register rd, Register rs1,
+ uint8_t uimm5);
+ void GenInstrCL(uint8_t funct3, BaseOpcode opcode, FPURegister rd,
+ Register rs1, uint8_t uimm5);
+ void GenInstrCS(uint8_t funct3, BaseOpcode opcode, Register rs2, Register rs1,
+ uint8_t uimm5);
+ void GenInstrCS(uint8_t funct3, BaseOpcode opcode, FPURegister rs2,
+ Register rs1, uint8_t uimm5);
+ void GenInstrCJ(uint8_t funct3, BaseOpcode opcode, uint16_t uint11);
+ void GenInstrCB(uint8_t funct3, BaseOpcode opcode, Register rs1,
+ uint8_t uimm8);
+ void GenInstrCBA(uint8_t funct3, uint8_t funct2, BaseOpcode opcode,
+ Register rs1, int8_t imm6);
+
+ // ----- Instruction class templates match those in LLVM's RISCVInstrInfo.td
+ void GenInstrBranchCC_rri(uint8_t funct3, Register rs1, Register rs2,
+ int16_t imm12);
+ void GenInstrLoad_ri(uint8_t funct3, Register rd, Register rs1,
+ int16_t imm12);
+ void GenInstrStore_rri(uint8_t funct3, Register rs1, Register rs2,
+ int16_t imm12);
+ void GenInstrALU_ri(uint8_t funct3, Register rd, Register rs1, int16_t imm12);
+ void GenInstrShift_ri(bool arithshift, uint8_t funct3, Register rd,
+ Register rs1, uint8_t shamt);
+ void GenInstrALU_rr(uint8_t funct7, uint8_t funct3, Register rd, Register rs1,
+ Register rs2);
+ void GenInstrCSR_ir(uint8_t funct3, Register rd, ControlStatusReg csr,
+ Register rs1);
+ void GenInstrCSR_ii(uint8_t funct3, Register rd, ControlStatusReg csr,
+ uint8_t rs1);
+ void GenInstrShiftW_ri(bool arithshift, uint8_t funct3, Register rd,
+ Register rs1, uint8_t shamt);
+ void GenInstrALUW_rr(uint8_t funct7, uint8_t funct3, Register rd,
+ Register rs1, Register rs2);
+ void GenInstrPriv(uint8_t funct7, Register rs1, Register rs2);
+ void GenInstrLoadFP_ri(uint8_t funct3, FPURegister rd, Register rs1,
+ int16_t imm12);
+ void GenInstrStoreFP_rri(uint8_t funct3, Register rs1, FPURegister rs2,
+ int16_t imm12);
+ void GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3, FPURegister rd,
+ FPURegister rs1, FPURegister rs2);
+ void GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3, FPURegister rd,
+ Register rs1, Register rs2);
+ void GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3, FPURegister rd,
+ FPURegister rs1, Register rs2);
+ void GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3, Register rd,
+ FPURegister rs1, Register rs2);
+ void GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3, Register rd,
+ FPURegister rs1, FPURegister rs2);
+};
+
+} // namespace jit
+} // namespace js
+
+#endif // jit_riscv64_extension_Base_assembler_riscv_h
diff --git a/js/src/jit/riscv64/extension/base-riscv-i.cc b/js/src/jit/riscv64/extension/base-riscv-i.cc
new file mode 100644
index 0000000000..2ee8877eb1
--- /dev/null
+++ b/js/src/jit/riscv64/extension/base-riscv-i.cc
@@ -0,0 +1,351 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#include "jit/riscv64/extension/base-riscv-i.h"
+#include "jit/riscv64/constant/Constant-riscv64.h"
+#include "jit/riscv64/Assembler-riscv64.h"
+#include "jit/riscv64/Architecture-riscv64.h"
+namespace js {
+namespace jit {
+
+void AssemblerRISCVI::lui(Register rd, int32_t imm20) {
+ GenInstrU(LUI, rd, imm20);
+}
+
+void AssemblerRISCVI::auipc(Register rd, int32_t imm20) {
+ GenInstrU(AUIPC, rd, imm20);
+}
+
+// Jumps
+
+void AssemblerRISCVI::jal(Register rd, int32_t imm21) {
+ GenInstrJ(JAL, rd, imm21);
+}
+
+void AssemblerRISCVI::jalr(Register rd, Register rs1, int16_t imm12) {
+ GenInstrI(0b000, JALR, rd, rs1, imm12);
+}
+
+// Branches
+
+void AssemblerRISCVI::beq(Register rs1, Register rs2, int16_t imm13) {
+ GenInstrBranchCC_rri(0b000, rs1, rs2, imm13);
+}
+
+void AssemblerRISCVI::bne(Register rs1, Register rs2, int16_t imm13) {
+ GenInstrBranchCC_rri(0b001, rs1, rs2, imm13);
+}
+
+void AssemblerRISCVI::blt(Register rs1, Register rs2, int16_t imm13) {
+ GenInstrBranchCC_rri(0b100, rs1, rs2, imm13);
+}
+
+void AssemblerRISCVI::bge(Register rs1, Register rs2, int16_t imm13) {
+ GenInstrBranchCC_rri(0b101, rs1, rs2, imm13);
+}
+
+void AssemblerRISCVI::bltu(Register rs1, Register rs2, int16_t imm13) {
+ GenInstrBranchCC_rri(0b110, rs1, rs2, imm13);
+}
+
+void AssemblerRISCVI::bgeu(Register rs1, Register rs2, int16_t imm13) {
+ GenInstrBranchCC_rri(0b111, rs1, rs2, imm13);
+}
+
+// Loads
+
+void AssemblerRISCVI::lb(Register rd, Register rs1, int16_t imm12) {
+ GenInstrLoad_ri(0b000, rd, rs1, imm12);
+}
+
+void AssemblerRISCVI::lh(Register rd, Register rs1, int16_t imm12) {
+ GenInstrLoad_ri(0b001, rd, rs1, imm12);
+}
+
+void AssemblerRISCVI::lw(Register rd, Register rs1, int16_t imm12) {
+ GenInstrLoad_ri(0b010, rd, rs1, imm12);
+}
+
+void AssemblerRISCVI::lbu(Register rd, Register rs1, int16_t imm12) {
+ GenInstrLoad_ri(0b100, rd, rs1, imm12);
+}
+
+void AssemblerRISCVI::lhu(Register rd, Register rs1, int16_t imm12) {
+ GenInstrLoad_ri(0b101, rd, rs1, imm12);
+}
+
+// Stores
+
+void AssemblerRISCVI::sb(Register source, Register base, int16_t imm12) {
+ GenInstrStore_rri(0b000, base, source, imm12);
+}
+
+void AssemblerRISCVI::sh(Register source, Register base, int16_t imm12) {
+ GenInstrStore_rri(0b001, base, source, imm12);
+}
+
+void AssemblerRISCVI::sw(Register source, Register base, int16_t imm12) {
+ GenInstrStore_rri(0b010, base, source, imm12);
+}
+
+// Arithmetic with immediate
+
+void AssemblerRISCVI::addi(Register rd, Register rs1, int16_t imm12) {
+ GenInstrALU_ri(0b000, rd, rs1, imm12);
+}
+
+void AssemblerRISCVI::slti(Register rd, Register rs1, int16_t imm12) {
+ GenInstrALU_ri(0b010, rd, rs1, imm12);
+}
+
+void AssemblerRISCVI::sltiu(Register rd, Register rs1, int16_t imm12) {
+ GenInstrALU_ri(0b011, rd, rs1, imm12);
+}
+
+void AssemblerRISCVI::xori(Register rd, Register rs1, int16_t imm12) {
+ GenInstrALU_ri(0b100, rd, rs1, imm12);
+}
+
+void AssemblerRISCVI::ori(Register rd, Register rs1, int16_t imm12) {
+ GenInstrALU_ri(0b110, rd, rs1, imm12);
+}
+
+void AssemblerRISCVI::andi(Register rd, Register rs1, int16_t imm12) {
+ GenInstrALU_ri(0b111, rd, rs1, imm12);
+}
+
+void AssemblerRISCVI::slli(Register rd, Register rs1, uint8_t shamt) {
+ GenInstrShift_ri(0, 0b001, rd, rs1, shamt & 0x3f);
+}
+
+void AssemblerRISCVI::srli(Register rd, Register rs1, uint8_t shamt) {
+ GenInstrShift_ri(0, 0b101, rd, rs1, shamt & 0x3f);
+}
+
+void AssemblerRISCVI::srai(Register rd, Register rs1, uint8_t shamt) {
+ GenInstrShift_ri(1, 0b101, rd, rs1, shamt & 0x3f);
+}
+
+// Arithmetic
+
+void AssemblerRISCVI::add(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000000, 0b000, rd, rs1, rs2);
+}
+
+void AssemblerRISCVI::sub(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0100000, 0b000, rd, rs1, rs2);
+}
+
+void AssemblerRISCVI::sll(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000000, 0b001, rd, rs1, rs2);
+}
+
+void AssemblerRISCVI::slt(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000000, 0b010, rd, rs1, rs2);
+}
+
+void AssemblerRISCVI::sltu(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000000, 0b011, rd, rs1, rs2);
+}
+
+void AssemblerRISCVI::xor_(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000000, 0b100, rd, rs1, rs2);
+}
+
+void AssemblerRISCVI::srl(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000000, 0b101, rd, rs1, rs2);
+}
+
+void AssemblerRISCVI::sra(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0100000, 0b101, rd, rs1, rs2);
+}
+
+void AssemblerRISCVI::or_(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000000, 0b110, rd, rs1, rs2);
+}
+
+void AssemblerRISCVI::and_(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000000, 0b111, rd, rs1, rs2);
+}
+
+// Memory fences
+
+void AssemblerRISCVI::fence(uint8_t pred, uint8_t succ) {
+ MOZ_ASSERT(is_uint4(pred) && is_uint4(succ));
+ uint16_t imm12 = succ | (pred << 4) | (0b0000 << 8);
+ GenInstrI(0b000, MISC_MEM, ToRegister(0UL), ToRegister(0UL), imm12);
+}
+
+void AssemblerRISCVI::fence_tso() {
+ uint16_t imm12 = (0b0011) | (0b0011 << 4) | (0b1000 << 8);
+ GenInstrI(0b000, MISC_MEM, ToRegister(0UL), ToRegister(0UL), imm12);
+}
+
+// Environment call / break
+
+void AssemblerRISCVI::ecall() {
+ GenInstrI(0b000, SYSTEM, ToRegister(0UL), ToRegister(0UL), 0);
+}
+
+void AssemblerRISCVI::ebreak() {
+ GenInstrI(0b000, SYSTEM, ToRegister(0UL), ToRegister(0UL), 1);
+}
+
+// This is a de facto standard (as set by GNU binutils) 32-bit unimplemented
+// instruction (i.e., it should always trap, if your implementation has invalid
+// instruction traps).
+void AssemblerRISCVI::unimp() {
+ GenInstrI(0b001, SYSTEM, ToRegister(0), ToRegister(0), 0b110000000000);
+}
+
+bool AssemblerRISCVI::IsBranch(Instr instr) {
+ return (instr & kBaseOpcodeMask) == BRANCH;
+}
+
+bool AssemblerRISCVI::IsJump(Instr instr) {
+ int Op = instr & kBaseOpcodeMask;
+ return Op == JAL || Op == JALR;
+}
+
+bool AssemblerRISCVI::IsNop(Instr instr) { return instr == kNopByte; }
+
+bool AssemblerRISCVI::IsJal(Instr instr) {
+ return (instr & kBaseOpcodeMask) == JAL;
+}
+
+bool AssemblerRISCVI::IsJalr(Instr instr) {
+ return (instr & kBaseOpcodeMask) == JALR;
+}
+
+bool AssemblerRISCVI::IsLui(Instr instr) {
+ return (instr & kBaseOpcodeMask) == LUI;
+}
+bool AssemblerRISCVI::IsAuipc(Instr instr) {
+ return (instr & kBaseOpcodeMask) == AUIPC;
+}
+bool AssemblerRISCVI::IsAddi(Instr instr) {
+ return (instr & (kBaseOpcodeMask | kFunct3Mask)) == RO_ADDI;
+}
+bool AssemblerRISCVI::IsOri(Instr instr) {
+ return (instr & (kBaseOpcodeMask | kFunct3Mask)) == RO_ORI;
+}
+bool AssemblerRISCVI::IsSlli(Instr instr) {
+ return (instr & (kBaseOpcodeMask | kFunct3Mask)) == RO_SLLI;
+}
+
+int AssemblerRISCVI::JumpOffset(Instr instr) {
+ int32_t imm21 = ((instr & 0x7fe00000) >> 20) | ((instr & 0x100000) >> 9) |
+ (instr & 0xff000) | ((instr & 0x80000000) >> 11);
+ imm21 = imm21 << 11 >> 11;
+ return imm21;
+}
+
+int AssemblerRISCVI::JalrOffset(Instr instr) {
+ MOZ_ASSERT(IsJalr(instr));
+ int32_t imm12 = static_cast<int32_t>(instr & kImm12Mask) >> 20;
+ return imm12;
+}
+
+int AssemblerRISCVI::AuipcOffset(Instr instr) {
+ MOZ_ASSERT(IsAuipc(instr));
+ int32_t imm20 = static_cast<int32_t>(instr & kImm20Mask);
+ return imm20;
+}
+
+bool AssemblerRISCVI::IsLw(Instr instr) {
+ return (instr & (kBaseOpcodeMask | kFunct3Mask)) == RO_LW;
+}
+
+int AssemblerRISCVI::LoadOffset(Instr instr) {
+#if JS_CODEGEN_RISCV64
+ MOZ_ASSERT(IsLd(instr));
+#elif V8_TARGET_ARCH_RISCV32
+ MOZ_ASSERT(IsLw(instr));
+#endif
+ int32_t imm12 = static_cast<int32_t>(instr & kImm12Mask) >> 20;
+ return imm12;
+}
+
+#ifdef JS_CODEGEN_RISCV64
+
+bool AssemblerRISCVI::IsAddiw(Instr instr) {
+ return (instr & (kBaseOpcodeMask | kFunct3Mask)) == RO_ADDIW;
+}
+
+bool AssemblerRISCVI::IsLd(Instr instr) {
+ return (instr & (kBaseOpcodeMask | kFunct3Mask)) == RO_LD;
+}
+
+void AssemblerRISCVI::lwu(Register rd, Register rs1, int16_t imm12) {
+ GenInstrLoad_ri(0b110, rd, rs1, imm12);
+}
+
+void AssemblerRISCVI::ld(Register rd, Register rs1, int16_t imm12) {
+ GenInstrLoad_ri(0b011, rd, rs1, imm12);
+}
+
+void AssemblerRISCVI::sd(Register source, Register base, int16_t imm12) {
+ GenInstrStore_rri(0b011, base, source, imm12);
+}
+
+void AssemblerRISCVI::addiw(Register rd, Register rs1, int16_t imm12) {
+ GenInstrI(0b000, OP_IMM_32, rd, rs1, imm12);
+}
+
+void AssemblerRISCVI::slliw(Register rd, Register rs1, uint8_t shamt) {
+ GenInstrShiftW_ri(0, 0b001, rd, rs1, shamt & 0x1f);
+}
+
+void AssemblerRISCVI::srliw(Register rd, Register rs1, uint8_t shamt) {
+ GenInstrShiftW_ri(0, 0b101, rd, rs1, shamt & 0x1f);
+}
+
+void AssemblerRISCVI::sraiw(Register rd, Register rs1, uint8_t shamt) {
+ GenInstrShiftW_ri(1, 0b101, rd, rs1, shamt & 0x1f);
+}
+
+void AssemblerRISCVI::addw(Register rd, Register rs1, Register rs2) {
+ GenInstrALUW_rr(0b0000000, 0b000, rd, rs1, rs2);
+}
+
+void AssemblerRISCVI::subw(Register rd, Register rs1, Register rs2) {
+ GenInstrALUW_rr(0b0100000, 0b000, rd, rs1, rs2);
+}
+
+void AssemblerRISCVI::sllw(Register rd, Register rs1, Register rs2) {
+ GenInstrALUW_rr(0b0000000, 0b001, rd, rs1, rs2);
+}
+
+void AssemblerRISCVI::srlw(Register rd, Register rs1, Register rs2) {
+ GenInstrALUW_rr(0b0000000, 0b101, rd, rs1, rs2);
+}
+
+void AssemblerRISCVI::sraw(Register rd, Register rs1, Register rs2) {
+ GenInstrALUW_rr(0b0100000, 0b101, rd, rs1, rs2);
+}
+
+#endif
+
+int AssemblerRISCVI::BranchOffset(Instr instr) {
+ // | imm[12] | imm[10:5] | rs2 | rs1 | funct3 | imm[4:1|11] | opcode |
+ // 31 25 11 7
+ int32_t imm13 = ((instr & 0xf00) >> 7) | ((instr & 0x7e000000) >> 20) |
+ ((instr & 0x80) << 4) | ((instr & 0x80000000) >> 19);
+ imm13 = imm13 << 19 >> 19;
+ return imm13;
+}
+
+int AssemblerRISCVI::BrachlongOffset(Instr auipc, Instr instr_I) {
+ MOZ_ASSERT(reinterpret_cast<Instruction*>(&instr_I)->InstructionType() ==
+ InstructionBase::kIType);
+ MOZ_ASSERT(IsAuipc(auipc));
+ MOZ_ASSERT(((auipc & kRdFieldMask) >> kRdShift) ==
+ ((instr_I & kRs1FieldMask) >> kRs1Shift));
+ int32_t imm_auipc = AuipcOffset(auipc);
+ int32_t imm12 = static_cast<int32_t>(instr_I & kImm12Mask) >> 20;
+ int32_t offset = imm12 + imm_auipc;
+ return offset;
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/riscv64/extension/base-riscv-i.h b/js/src/jit/riscv64/extension/base-riscv-i.h
new file mode 100644
index 0000000000..cca342c960
--- /dev/null
+++ b/js/src/jit/riscv64/extension/base-riscv-i.h
@@ -0,0 +1,273 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#ifndef jit_riscv64_extension_Base_riscv_i_h_
+#define jit_riscv64_extension_Base_riscv_i_h_
+#include "mozilla/Assertions.h"
+
+#include <stdint.h>
+
+#include "jit/riscv64/constant/Constant-riscv64.h"
+#include "jit/riscv64/extension/base-assembler-riscv.h"
+namespace js {
+namespace jit {
+
+class AssemblerRISCVI : public AssemblerRiscvBase {
+ public:
+ void lui(Register rd, int32_t imm20);
+ void auipc(Register rd, int32_t imm20);
+
+ // Jumps
+ void jal(Register rd, int32_t imm20);
+ void jalr(Register rd, Register rs1, int16_t imm12);
+
+ // Branches
+ void beq(Register rs1, Register rs2, int16_t imm12);
+ void bne(Register rs1, Register rs2, int16_t imm12);
+ void blt(Register rs1, Register rs2, int16_t imm12);
+ void bge(Register rs1, Register rs2, int16_t imm12);
+ void bltu(Register rs1, Register rs2, int16_t imm12);
+ void bgeu(Register rs1, Register rs2, int16_t imm12);
+ // Loads
+ void lb(Register rd, Register rs1, int16_t imm12);
+ void lh(Register rd, Register rs1, int16_t imm12);
+ void lw(Register rd, Register rs1, int16_t imm12);
+ void lbu(Register rd, Register rs1, int16_t imm12);
+ void lhu(Register rd, Register rs1, int16_t imm12);
+
+ // Stores
+ void sb(Register source, Register base, int16_t imm12);
+ void sh(Register source, Register base, int16_t imm12);
+ void sw(Register source, Register base, int16_t imm12);
+
+ // Arithmetic with immediate
+ void addi(Register rd, Register rs1, int16_t imm12);
+ void slti(Register rd, Register rs1, int16_t imm12);
+ void sltiu(Register rd, Register rs1, int16_t imm12);
+ void xori(Register rd, Register rs1, int16_t imm12);
+ void ori(Register rd, Register rs1, int16_t imm12);
+ void andi(Register rd, Register rs1, int16_t imm12);
+ void slli(Register rd, Register rs1, uint8_t shamt);
+ void srli(Register rd, Register rs1, uint8_t shamt);
+ void srai(Register rd, Register rs1, uint8_t shamt);
+
+ // Arithmetic
+ void add(Register rd, Register rs1, Register rs2);
+ void sub(Register rd, Register rs1, Register rs2);
+ void sll(Register rd, Register rs1, Register rs2);
+ void slt(Register rd, Register rs1, Register rs2);
+ void sltu(Register rd, Register rs1, Register rs2);
+ void xor_(Register rd, Register rs1, Register rs2);
+ void srl(Register rd, Register rs1, Register rs2);
+ void sra(Register rd, Register rs1, Register rs2);
+ void or_(Register rd, Register rs1, Register rs2);
+ void and_(Register rd, Register rs1, Register rs2);
+
+ // Other pseudo instructions that are not part of RISCV pseudo assemly
+ void nor(Register rd, Register rs, Register rt) {
+ or_(rd, rs, rt);
+ not_(rd, rd);
+ }
+
+ // Memory fences
+ void fence(uint8_t pred, uint8_t succ);
+ void fence_tso();
+
+ // Environment call / break
+ void ecall();
+ void ebreak();
+
+ void sync() { fence(0b1111, 0b1111); }
+
+ // This is a de facto standard (as set by GNU binutils) 32-bit unimplemented
+ // instruction (i.e., it should always trap, if your implementation has
+ // invalid instruction traps).
+ void unimp();
+
+ static int JumpOffset(Instr instr);
+ static int AuipcOffset(Instr instr);
+ static int JalrOffset(Instr instr);
+ static int LoadOffset(Instr instr);
+ static int BranchOffset(Instr instr);
+ static int BrachlongOffset(Instr auipc, Instr instr_I);
+ static inline Instr SetBranchOffset(int32_t pos, int32_t target_pos,
+ Instr instr) {
+ int32_t imm = target_pos - pos;
+ MOZ_ASSERT((imm & 1) == 0);
+ MOZ_ASSERT(is_intn(imm, kBranchOffsetBits));
+
+ instr &= ~kBImm12Mask;
+ int32_t imm12 = ((imm & 0x800) >> 4) | // bit 11
+ ((imm & 0x1e) << 7) | // bits 4-1
+ ((imm & 0x7e0) << 20) | // bits 10-5
+ ((imm & 0x1000) << 19); // bit 12
+
+ return instr | (imm12 & kBImm12Mask);
+ }
+
+ static inline Instr SetJalOffset(int32_t pos, int32_t target_pos,
+ Instr instr) {
+ MOZ_ASSERT(IsJal(instr));
+ int32_t imm = target_pos - pos;
+ MOZ_ASSERT((imm & 1) == 0);
+ MOZ_ASSERT(is_intn(imm, kJumpOffsetBits));
+
+ instr &= ~kImm20Mask;
+ int32_t imm20 = (imm & 0xff000) | // bits 19-12
+ ((imm & 0x800) << 9) | // bit 11
+ ((imm & 0x7fe) << 20) | // bits 10-1
+ ((imm & 0x100000) << 11); // bit 20
+
+ return instr | (imm20 & kImm20Mask);
+ }
+
+ static inline Instr SetJalrOffset(int32_t offset, Instr instr) {
+ MOZ_ASSERT(IsJalr(instr));
+ MOZ_ASSERT(is_int12(offset));
+ instr &= ~kImm12Mask;
+ int32_t imm12 = offset << kImm12Shift;
+ MOZ_ASSERT(IsJalr(instr | (imm12 & kImm12Mask)));
+ MOZ_ASSERT(JalrOffset(instr | (imm12 & kImm12Mask)) == offset);
+ return instr | (imm12 & kImm12Mask);
+ }
+
+ static inline Instr SetLoadOffset(int32_t offset, Instr instr) {
+#if JS_CODEGEN_RISCV64
+ MOZ_ASSERT(IsLd(instr));
+#elif JS_CODEGEN_RISCV32
+ MOZ_ASSERT(IsLw(instr));
+#endif
+ MOZ_ASSERT(is_int12(offset));
+ instr &= ~kImm12Mask;
+ int32_t imm12 = offset << kImm12Shift;
+ return instr | (imm12 & kImm12Mask);
+ }
+
+ static inline Instr SetAuipcOffset(int32_t offset, Instr instr) {
+ MOZ_ASSERT(IsAuipc(instr));
+ MOZ_ASSERT(is_int20(offset));
+ instr = (instr & ~kImm31_12Mask) | ((offset & kImm19_0Mask) << 12);
+ return instr;
+ }
+
+ // Check if an instruction is a branch of some kind.
+ static bool IsBranch(Instr instr);
+ static bool IsNop(Instr instr);
+ static bool IsJump(Instr instr);
+ static bool IsJal(Instr instr);
+ static bool IsJalr(Instr instr);
+ static bool IsLui(Instr instr);
+ static bool IsAuipc(Instr instr);
+ static bool IsAddi(Instr instr);
+ static bool IsOri(Instr instr);
+ static bool IsSlli(Instr instr);
+ static bool IsLw(Instr instr);
+
+ inline int32_t branch_offset(Label* L) {
+ return branch_offset_helper(L, OffsetSize::kOffset13);
+ }
+ inline int32_t jump_offset(Label* L) {
+ return branch_offset_helper(L, OffsetSize::kOffset21);
+ }
+
+ // Branches
+ void beq(Register rs1, Register rs2, Label* L) {
+ beq(rs1, rs2, branch_offset(L));
+ }
+ void bne(Register rs1, Register rs2, Label* L) {
+ bne(rs1, rs2, branch_offset(L));
+ }
+ void blt(Register rs1, Register rs2, Label* L) {
+ blt(rs1, rs2, branch_offset(L));
+ }
+ void bge(Register rs1, Register rs2, Label* L) {
+ bge(rs1, rs2, branch_offset(L));
+ }
+ void bltu(Register rs1, Register rs2, Label* L) {
+ bltu(rs1, rs2, branch_offset(L));
+ }
+ void bgeu(Register rs1, Register rs2, Label* L) {
+ bgeu(rs1, rs2, branch_offset(L));
+ }
+
+ void beqz(Register rs, int16_t imm13) { beq(rs, zero_reg, imm13); }
+ void beqz(Register rs1, Label* L) { beqz(rs1, branch_offset(L)); }
+ void bnez(Register rs, int16_t imm13) { bne(rs, zero_reg, imm13); }
+ void bnez(Register rs1, Label* L) { bnez(rs1, branch_offset(L)); }
+ void blez(Register rs, int16_t imm13) { bge(zero_reg, rs, imm13); }
+ void blez(Register rs1, Label* L) { blez(rs1, branch_offset(L)); }
+ void bgez(Register rs, int16_t imm13) { bge(rs, zero_reg, imm13); }
+ void bgez(Register rs1, Label* L) { bgez(rs1, branch_offset(L)); }
+ void bltz(Register rs, int16_t imm13) { blt(rs, zero_reg, imm13); }
+ void bltz(Register rs1, Label* L) { bltz(rs1, branch_offset(L)); }
+ void bgtz(Register rs, int16_t imm13) { blt(zero_reg, rs, imm13); }
+
+ void bgtz(Register rs1, Label* L) { bgtz(rs1, branch_offset(L)); }
+ void bgt(Register rs1, Register rs2, int16_t imm13) { blt(rs2, rs1, imm13); }
+ void bgt(Register rs1, Register rs2, Label* L) {
+ bgt(rs1, rs2, branch_offset(L));
+ }
+ void ble(Register rs1, Register rs2, int16_t imm13) { bge(rs2, rs1, imm13); }
+ void ble(Register rs1, Register rs2, Label* L) {
+ ble(rs1, rs2, branch_offset(L));
+ }
+ void bgtu(Register rs1, Register rs2, int16_t imm13) {
+ bltu(rs2, rs1, imm13);
+ }
+ void bgtu(Register rs1, Register rs2, Label* L) {
+ bgtu(rs1, rs2, branch_offset(L));
+ }
+ void bleu(Register rs1, Register rs2, int16_t imm13) {
+ bgeu(rs2, rs1, imm13);
+ }
+ void bleu(Register rs1, Register rs2, Label* L) {
+ bleu(rs1, rs2, branch_offset(L));
+ }
+
+ void j(int32_t imm21) { jal(zero_reg, imm21); }
+ void j(Label* L) { j(jump_offset(L)); }
+ void b(Label* L) { j(L); }
+ void jal(int32_t imm21) { jal(ra, imm21); }
+ void jal(Label* L) { jal(jump_offset(L)); }
+ void jr(Register rs) { jalr(zero_reg, rs, 0); }
+ void jr(Register rs, int32_t imm12) { jalr(zero_reg, rs, imm12); }
+ void jalr(Register rs, int32_t imm12) { jalr(ra, rs, imm12); }
+ void jalr(Register rs) { jalr(ra, rs, 0); }
+ void call(int32_t offset) {
+ auipc(ra, (offset >> 12) + ((offset & 0x800) >> 11));
+ jalr(ra, ra, offset << 20 >> 20);
+ }
+
+ void mv(Register rd, Register rs) { addi(rd, rs, 0); }
+ void not_(Register rd, Register rs) { xori(rd, rs, -1); }
+ void neg(Register rd, Register rs) { sub(rd, zero_reg, rs); }
+ void seqz(Register rd, Register rs) { sltiu(rd, rs, 1); }
+ void snez(Register rd, Register rs) { sltu(rd, zero_reg, rs); }
+ void sltz(Register rd, Register rs) { slt(rd, rs, zero_reg); }
+ void sgtz(Register rd, Register rs) { slt(rd, zero_reg, rs); }
+
+#if JS_CODEGEN_RISCV64
+ void lwu(Register rd, Register rs1, int16_t imm12);
+ void ld(Register rd, Register rs1, int16_t imm12);
+ void sd(Register source, Register base, int16_t imm12);
+ void addiw(Register rd, Register rs1, int16_t imm12);
+ void slliw(Register rd, Register rs1, uint8_t shamt);
+ void srliw(Register rd, Register rs1, uint8_t shamt);
+ void sraiw(Register rd, Register rs1, uint8_t shamt);
+ void addw(Register rd, Register rs1, Register rs2);
+ void subw(Register rd, Register rs1, Register rs2);
+ void sllw(Register rd, Register rs1, Register rs2);
+ void srlw(Register rd, Register rs1, Register rs2);
+ void sraw(Register rd, Register rs1, Register rs2);
+ void negw(Register rd, Register rs) { subw(rd, zero_reg, rs); }
+ void sext_w(Register rd, Register rs) { addiw(rd, rs, 0); }
+
+ static bool IsAddiw(Instr instr);
+ static bool IsLd(Instr instr);
+#endif
+};
+
+} // namespace jit
+} // namespace js
+
+#endif // jit_riscv64_extension_Base_riscv_I_h_
diff --git a/js/src/jit/riscv64/extension/extension-riscv-a.cc b/js/src/jit/riscv64/extension/extension-riscv-a.cc
new file mode 100644
index 0000000000..ead355fc0a
--- /dev/null
+++ b/js/src/jit/riscv64/extension/extension-riscv-a.cc
@@ -0,0 +1,123 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#include "jit/riscv64/extension/extension-riscv-a.h"
+#include "jit/riscv64/Assembler-riscv64.h"
+#include "jit/riscv64/constant/Constant-riscv64.h"
+#include "jit/riscv64/Architecture-riscv64.h"
+namespace js {
+namespace jit {
+
+// RV32A Standard Extension
+void AssemblerRISCVA::lr_w(bool aq, bool rl, Register rd, Register rs1) {
+ GenInstrRAtomic(0b00010, aq, rl, 0b010, rd, rs1, zero_reg);
+}
+
+void AssemblerRISCVA::sc_w(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b00011, aq, rl, 0b010, rd, rs1, rs2);
+}
+
+void AssemblerRISCVA::amoswap_w(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b00001, aq, rl, 0b010, rd, rs1, rs2);
+}
+
+void AssemblerRISCVA::amoadd_w(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b00000, aq, rl, 0b010, rd, rs1, rs2);
+}
+
+void AssemblerRISCVA::amoxor_w(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b00100, aq, rl, 0b010, rd, rs1, rs2);
+}
+
+void AssemblerRISCVA::amoand_w(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b01100, aq, rl, 0b010, rd, rs1, rs2);
+}
+
+void AssemblerRISCVA::amoor_w(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b01000, aq, rl, 0b010, rd, rs1, rs2);
+}
+
+void AssemblerRISCVA::amomin_w(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b10000, aq, rl, 0b010, rd, rs1, rs2);
+}
+
+void AssemblerRISCVA::amomax_w(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b10100, aq, rl, 0b010, rd, rs1, rs2);
+}
+
+void AssemblerRISCVA::amominu_w(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b11000, aq, rl, 0b010, rd, rs1, rs2);
+}
+
+void AssemblerRISCVA::amomaxu_w(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b11100, aq, rl, 0b010, rd, rs1, rs2);
+}
+
+// RV64A Standard Extension (in addition to RV32A)
+#ifdef JS_CODEGEN_RISCV64
+void AssemblerRISCVA::lr_d(bool aq, bool rl, Register rd, Register rs1) {
+ GenInstrRAtomic(0b00010, aq, rl, 0b011, rd, rs1, zero_reg);
+}
+
+void AssemblerRISCVA::sc_d(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b00011, aq, rl, 0b011, rd, rs1, rs2);
+}
+
+void AssemblerRISCVA::amoswap_d(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b00001, aq, rl, 0b011, rd, rs1, rs2);
+}
+
+void AssemblerRISCVA::amoadd_d(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b00000, aq, rl, 0b011, rd, rs1, rs2);
+}
+
+void AssemblerRISCVA::amoxor_d(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b00100, aq, rl, 0b011, rd, rs1, rs2);
+}
+
+void AssemblerRISCVA::amoand_d(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b01100, aq, rl, 0b011, rd, rs1, rs2);
+}
+
+void AssemblerRISCVA::amoor_d(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b01000, aq, rl, 0b011, rd, rs1, rs2);
+}
+
+void AssemblerRISCVA::amomin_d(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b10000, aq, rl, 0b011, rd, rs1, rs2);
+}
+
+void AssemblerRISCVA::amomax_d(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b10100, aq, rl, 0b011, rd, rs1, rs2);
+}
+
+void AssemblerRISCVA::amominu_d(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b11000, aq, rl, 0b011, rd, rs1, rs2);
+}
+
+void AssemblerRISCVA::amomaxu_d(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b11100, aq, rl, 0b011, rd, rs1, rs2);
+}
+#endif
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/riscv64/extension/extension-riscv-a.h b/js/src/jit/riscv64/extension/extension-riscv-a.h
new file mode 100644
index 0000000000..442a4f5bba
--- /dev/null
+++ b/js/src/jit/riscv64/extension/extension-riscv-a.h
@@ -0,0 +1,46 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file."
+#ifndef jit_riscv64_extension_Extension_riscv_a_h_
+#define jit_riscv64_extension_Extension_riscv_a_h_
+#include "mozilla/Assertions.h"
+
+#include <stdint.h>
+
+#include "jit/riscv64/extension/base-assembler-riscv.h"
+#include "jit/riscv64/Register-riscv64.h"
+namespace js {
+namespace jit {
+class AssemblerRISCVA : public AssemblerRiscvBase {
+ // RV32A Standard Extension
+ public:
+ void lr_w(bool aq, bool rl, Register rd, Register rs1);
+ void sc_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amoswap_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amoadd_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amoxor_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amoand_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amoor_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amomin_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amomax_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amominu_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amomaxu_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+
+#ifdef JS_CODEGEN_RISCV64
+ // RV64A Standard Extension (in addition to RV32A)
+ void lr_d(bool aq, bool rl, Register rd, Register rs1);
+ void sc_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amoswap_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amoadd_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amoxor_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amoand_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amoor_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amomin_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amomax_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amominu_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amomaxu_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+#endif
+};
+} // namespace jit
+} // namespace js
+#endif // jit_riscv64_extension_Extension_riscv_A_h_
diff --git a/js/src/jit/riscv64/extension/extension-riscv-c.cc b/js/src/jit/riscv64/extension/extension-riscv-c.cc
new file mode 100644
index 0000000000..714753a0e0
--- /dev/null
+++ b/js/src/jit/riscv64/extension/extension-riscv-c.cc
@@ -0,0 +1,275 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#include "jit/riscv64/extension/extension-riscv-c.h"
+#include "jit/riscv64/Assembler-riscv64.h"
+#include "jit/riscv64/constant/Constant-riscv64.h"
+#include "jit/riscv64/Architecture-riscv64.h"
+namespace js {
+namespace jit {
+// RV64C Standard Extension
+void AssemblerRISCVC::c_nop() { GenInstrCI(0b000, C1, zero_reg, 0); }
+
+void AssemblerRISCVC::c_addi(Register rd, int8_t imm6) {
+ MOZ_ASSERT(rd != zero_reg && imm6 != 0);
+ GenInstrCI(0b000, C1, rd, imm6);
+}
+
+#ifdef JS_CODEGEN_RISCV64
+void AssemblerRISCVC::c_addiw(Register rd, int8_t imm6) {
+ MOZ_ASSERT(rd != zero_reg);
+ GenInstrCI(0b001, C1, rd, imm6);
+}
+#endif
+
+void AssemblerRISCVC::c_addi16sp(int16_t imm10) {
+ MOZ_ASSERT(is_int10(imm10) && (imm10 & 0xf) == 0);
+ uint8_t uimm6 = ((imm10 & 0x200) >> 4) | (imm10 & 0x10) |
+ ((imm10 & 0x40) >> 3) | ((imm10 & 0x180) >> 6) |
+ ((imm10 & 0x20) >> 5);
+ GenInstrCIU(0b011, C1, sp, uimm6);
+}
+
+void AssemblerRISCVC::c_addi4spn(Register rd, int16_t uimm10) {
+ MOZ_ASSERT(is_uint10(uimm10) && (uimm10 != 0));
+ uint8_t uimm8 = ((uimm10 & 0x4) >> 1) | ((uimm10 & 0x8) >> 3) |
+ ((uimm10 & 0x30) << 2) | ((uimm10 & 0x3c0) >> 4);
+ GenInstrCIW(0b000, C0, rd, uimm8);
+}
+
+void AssemblerRISCVC::c_li(Register rd, int8_t imm6) {
+ MOZ_ASSERT(rd != zero_reg);
+ GenInstrCI(0b010, C1, rd, imm6);
+}
+
+void AssemblerRISCVC::c_lui(Register rd, int8_t imm6) {
+ MOZ_ASSERT(rd != zero_reg && rd != sp && imm6 != 0);
+ GenInstrCI(0b011, C1, rd, imm6);
+}
+
+void AssemblerRISCVC::c_slli(Register rd, uint8_t shamt6) {
+ MOZ_ASSERT(rd != zero_reg && shamt6 != 0);
+ GenInstrCIU(0b000, C2, rd, shamt6);
+}
+
+void AssemblerRISCVC::c_fldsp(FPURegister rd, uint16_t uimm9) {
+ MOZ_ASSERT(is_uint9(uimm9) && (uimm9 & 0x7) == 0);
+ uint8_t uimm6 = (uimm9 & 0x38) | ((uimm9 & 0x1c0) >> 6);
+ GenInstrCIU(0b001, C2, rd, uimm6);
+}
+
+#ifdef JS_CODEGEN_RISCV64
+void AssemblerRISCVC::c_ldsp(Register rd, uint16_t uimm9) {
+ MOZ_ASSERT(rd != zero_reg && is_uint9(uimm9) && (uimm9 & 0x7) == 0);
+ uint8_t uimm6 = (uimm9 & 0x38) | ((uimm9 & 0x1c0) >> 6);
+ GenInstrCIU(0b011, C2, rd, uimm6);
+}
+#endif
+
+void AssemblerRISCVC::c_lwsp(Register rd, uint16_t uimm8) {
+ MOZ_ASSERT(rd != zero_reg && is_uint8(uimm8) && (uimm8 & 0x3) == 0);
+ uint8_t uimm6 = (uimm8 & 0x3c) | ((uimm8 & 0xc0) >> 6);
+ GenInstrCIU(0b010, C2, rd, uimm6);
+}
+
+void AssemblerRISCVC::c_jr(Register rs1) {
+ MOZ_ASSERT(rs1 != zero_reg);
+ GenInstrCR(0b1000, C2, rs1, zero_reg);
+}
+
+void AssemblerRISCVC::c_mv(Register rd, Register rs2) {
+ MOZ_ASSERT(rd != zero_reg && rs2 != zero_reg);
+ GenInstrCR(0b1000, C2, rd, rs2);
+}
+
+void AssemblerRISCVC::c_ebreak() { GenInstrCR(0b1001, C2, zero_reg, zero_reg); }
+
+void AssemblerRISCVC::c_jalr(Register rs1) {
+ MOZ_ASSERT(rs1 != zero_reg);
+ GenInstrCR(0b1001, C2, rs1, zero_reg);
+}
+
+void AssemblerRISCVC::c_add(Register rd, Register rs2) {
+ MOZ_ASSERT(rd != zero_reg && rs2 != zero_reg);
+ GenInstrCR(0b1001, C2, rd, rs2);
+}
+
+// CA Instructions
+void AssemblerRISCVC::c_sub(Register rd, Register rs2) {
+ MOZ_ASSERT(((rd.code() & 0b11000) == 0b01000) &&
+ ((rs2.code() & 0b11000) == 0b01000));
+ GenInstrCA(0b100011, C1, rd, 0b00, rs2);
+}
+
+void AssemblerRISCVC::c_xor(Register rd, Register rs2) {
+ MOZ_ASSERT(((rd.code() & 0b11000) == 0b01000) &&
+ ((rs2.code() & 0b11000) == 0b01000));
+ GenInstrCA(0b100011, C1, rd, 0b01, rs2);
+}
+
+void AssemblerRISCVC::c_or(Register rd, Register rs2) {
+ MOZ_ASSERT(((rd.code() & 0b11000) == 0b01000) &&
+ ((rs2.code() & 0b11000) == 0b01000));
+ GenInstrCA(0b100011, C1, rd, 0b10, rs2);
+}
+
+void AssemblerRISCVC::c_and(Register rd, Register rs2) {
+ MOZ_ASSERT(((rd.code() & 0b11000) == 0b01000) &&
+ ((rs2.code() & 0b11000) == 0b01000));
+ GenInstrCA(0b100011, C1, rd, 0b11, rs2);
+}
+
+#ifdef JS_CODEGEN_RISCV64
+void AssemblerRISCVC::c_subw(Register rd, Register rs2) {
+ MOZ_ASSERT(((rd.code() & 0b11000) == 0b01000) &&
+ ((rs2.code() & 0b11000) == 0b01000));
+ GenInstrCA(0b100111, C1, rd, 0b00, rs2);
+}
+
+void AssemblerRISCVC::c_addw(Register rd, Register rs2) {
+ MOZ_ASSERT(((rd.code() & 0b11000) == 0b01000) &&
+ ((rs2.code() & 0b11000) == 0b01000));
+ GenInstrCA(0b100111, C1, rd, 0b01, rs2);
+}
+#endif
+
+void AssemblerRISCVC::c_swsp(Register rs2, uint16_t uimm8) {
+ MOZ_ASSERT(is_uint8(uimm8) && (uimm8 & 0x3) == 0);
+ uint8_t uimm6 = (uimm8 & 0x3c) | ((uimm8 & 0xc0) >> 6);
+ GenInstrCSS(0b110, C2, rs2, uimm6);
+}
+
+#ifdef JS_CODEGEN_RISCV64
+void AssemblerRISCVC::c_sdsp(Register rs2, uint16_t uimm9) {
+ MOZ_ASSERT(is_uint9(uimm9) && (uimm9 & 0x7) == 0);
+ uint8_t uimm6 = (uimm9 & 0x38) | ((uimm9 & 0x1c0) >> 6);
+ GenInstrCSS(0b111, C2, rs2, uimm6);
+}
+#endif
+
+void AssemblerRISCVC::c_fsdsp(FPURegister rs2, uint16_t uimm9) {
+ MOZ_ASSERT(is_uint9(uimm9) && (uimm9 & 0x7) == 0);
+ uint8_t uimm6 = (uimm9 & 0x38) | ((uimm9 & 0x1c0) >> 6);
+ GenInstrCSS(0b101, C2, rs2, uimm6);
+}
+
+// CL Instructions
+
+void AssemblerRISCVC::c_lw(Register rd, Register rs1, uint16_t uimm7) {
+ MOZ_ASSERT(((rd.code() & 0b11000) == 0b01000) &&
+ ((rs1.code() & 0b11000) == 0b01000) && is_uint7(uimm7) &&
+ ((uimm7 & 0x3) == 0));
+ uint8_t uimm5 =
+ ((uimm7 & 0x4) >> 1) | ((uimm7 & 0x40) >> 6) | ((uimm7 & 0x38) >> 1);
+ GenInstrCL(0b010, C0, rd, rs1, uimm5);
+}
+
+#ifdef JS_CODEGEN_RISCV64
+void AssemblerRISCVC::c_ld(Register rd, Register rs1, uint16_t uimm8) {
+ MOZ_ASSERT(((rd.code() & 0b11000) == 0b01000) &&
+ ((rs1.code() & 0b11000) == 0b01000) && is_uint8(uimm8) &&
+ ((uimm8 & 0x7) == 0));
+ uint8_t uimm5 = ((uimm8 & 0x38) >> 1) | ((uimm8 & 0xc0) >> 6);
+ GenInstrCL(0b011, C0, rd, rs1, uimm5);
+}
+#endif
+
+void AssemblerRISCVC::c_fld(FPURegister rd, Register rs1, uint16_t uimm8) {
+ MOZ_ASSERT(((rd.code() & 0b11000) == 0b01000) &&
+ ((rs1.code() & 0b11000) == 0b01000) && is_uint8(uimm8) &&
+ ((uimm8 & 0x7) == 0));
+ uint8_t uimm5 = ((uimm8 & 0x38) >> 1) | ((uimm8 & 0xc0) >> 6);
+ GenInstrCL(0b001, C0, rd, rs1, uimm5);
+}
+
+// CS Instructions
+
+void AssemblerRISCVC::c_sw(Register rs2, Register rs1, uint16_t uimm7) {
+ MOZ_ASSERT(((rs2.code() & 0b11000) == 0b01000) &&
+ ((rs1.code() & 0b11000) == 0b01000) && is_uint7(uimm7) &&
+ ((uimm7 & 0x3) == 0));
+ uint8_t uimm5 =
+ ((uimm7 & 0x4) >> 1) | ((uimm7 & 0x40) >> 6) | ((uimm7 & 0x38) >> 1);
+ GenInstrCS(0b110, C0, rs2, rs1, uimm5);
+}
+
+#ifdef JS_CODEGEN_RISCV64
+void AssemblerRISCVC::c_sd(Register rs2, Register rs1, uint16_t uimm8) {
+ MOZ_ASSERT(((rs2.code() & 0b11000) == 0b01000) &&
+ ((rs1.code() & 0b11000) == 0b01000) && is_uint8(uimm8) &&
+ ((uimm8 & 0x7) == 0));
+ uint8_t uimm5 = ((uimm8 & 0x38) >> 1) | ((uimm8 & 0xc0) >> 6);
+ GenInstrCS(0b111, C0, rs2, rs1, uimm5);
+}
+#endif
+
+void AssemblerRISCVC::c_fsd(FPURegister rs2, Register rs1, uint16_t uimm8) {
+ MOZ_ASSERT(((rs2.code() & 0b11000) == 0b01000) &&
+ ((rs1.code() & 0b11000) == 0b01000) && is_uint8(uimm8) &&
+ ((uimm8 & 0x7) == 0));
+ uint8_t uimm5 = ((uimm8 & 0x38) >> 1) | ((uimm8 & 0xc0) >> 6);
+ GenInstrCS(0b101, C0, rs2, rs1, uimm5);
+}
+
+// CJ Instructions
+
+void AssemblerRISCVC::c_j(int16_t imm12) {
+ MOZ_ASSERT(is_int12(imm12));
+ int16_t uimm11 = ((imm12 & 0x800) >> 1) | ((imm12 & 0x400) >> 4) |
+ ((imm12 & 0x300) >> 1) | ((imm12 & 0x80) >> 3) |
+ ((imm12 & 0x40) >> 1) | ((imm12 & 0x20) >> 5) |
+ ((imm12 & 0x10) << 5) | (imm12 & 0xe);
+ GenInstrCJ(0b101, C1, uimm11);
+}
+
+// CB Instructions
+
+void AssemblerRISCVC::c_bnez(Register rs1, int16_t imm9) {
+ MOZ_ASSERT(((rs1.code() & 0b11000) == 0b01000) && is_int9(imm9));
+ uint8_t uimm8 = ((imm9 & 0x20) >> 5) | ((imm9 & 0x6)) | ((imm9 & 0xc0) >> 3) |
+ ((imm9 & 0x18) << 2) | ((imm9 & 0x100) >> 1);
+ GenInstrCB(0b111, C1, rs1, uimm8);
+}
+
+void AssemblerRISCVC::c_beqz(Register rs1, int16_t imm9) {
+ MOZ_ASSERT(((rs1.code() & 0b11000) == 0b01000) && is_int9(imm9));
+ uint8_t uimm8 = ((imm9 & 0x20) >> 5) | ((imm9 & 0x6)) | ((imm9 & 0xc0) >> 3) |
+ ((imm9 & 0x18) << 2) | ((imm9 & 0x100) >> 1);
+ GenInstrCB(0b110, C1, rs1, uimm8);
+}
+
+void AssemblerRISCVC::c_srli(Register rs1, int8_t shamt6) {
+ MOZ_ASSERT(((rs1.code() & 0b11000) == 0b01000) && is_int6(shamt6));
+ GenInstrCBA(0b100, 0b00, C1, rs1, shamt6);
+}
+
+void AssemblerRISCVC::c_srai(Register rs1, int8_t shamt6) {
+ MOZ_ASSERT(((rs1.code() & 0b11000) == 0b01000) && is_int6(shamt6));
+ GenInstrCBA(0b100, 0b01, C1, rs1, shamt6);
+}
+
+void AssemblerRISCVC::c_andi(Register rs1, int8_t imm6) {
+ MOZ_ASSERT(((rs1.code() & 0b11000) == 0b01000) && is_int6(imm6));
+ GenInstrCBA(0b100, 0b10, C1, rs1, imm6);
+}
+
+bool AssemblerRISCVC::IsCJal(Instr instr) {
+ return (instr & kRvcOpcodeMask) == RO_C_J;
+}
+
+bool AssemblerRISCVC::IsCBranch(Instr instr) {
+ int Op = instr & kRvcOpcodeMask;
+ return Op == RO_C_BNEZ || Op == RO_C_BEQZ;
+}
+
+int AssemblerRISCVC::CJumpOffset(Instr instr) {
+ int32_t imm12 = ((instr & 0x4) << 3) | ((instr & 0x38) >> 2) |
+ ((instr & 0x40) << 1) | ((instr & 0x80) >> 1) |
+ ((instr & 0x100) << 2) | ((instr & 0x600) >> 1) |
+ ((instr & 0x800) >> 7) | ((instr & 0x1000) >> 1);
+ imm12 = imm12 << 20 >> 20;
+ return imm12;
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/riscv64/extension/extension-riscv-c.h b/js/src/jit/riscv64/extension/extension-riscv-c.h
new file mode 100644
index 0000000000..655141cb30
--- /dev/null
+++ b/js/src/jit/riscv64/extension/extension-riscv-c.h
@@ -0,0 +1,77 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#ifndef jit_riscv64_extension_Extension_riscv_c_h_
+#define jit_riscv64_extension_Extension_riscv_c_h_
+#include "mozilla/Assertions.h"
+
+#include <stdint.h>
+
+#include "jit/riscv64/extension/base-assembler-riscv.h"
+#include "jit/riscv64/Register-riscv64.h"
+namespace js {
+namespace jit {
+class AssemblerRISCVC : public AssemblerRiscvBase {
+ // RV64C Standard Extension
+ public:
+ void c_nop();
+ void c_addi(Register rd, int8_t imm6);
+
+ void c_addi16sp(int16_t imm10);
+ void c_addi4spn(Register rd, int16_t uimm10);
+ void c_li(Register rd, int8_t imm6);
+ void c_lui(Register rd, int8_t imm6);
+ void c_slli(Register rd, uint8_t shamt6);
+ void c_lwsp(Register rd, uint16_t uimm8);
+ void c_jr(Register rs1);
+ void c_mv(Register rd, Register rs2);
+ void c_ebreak();
+ void c_jalr(Register rs1);
+ void c_j(int16_t imm12);
+ void c_add(Register rd, Register rs2);
+ void c_sub(Register rd, Register rs2);
+ void c_and(Register rd, Register rs2);
+ void c_xor(Register rd, Register rs2);
+ void c_or(Register rd, Register rs2);
+ void c_swsp(Register rs2, uint16_t uimm8);
+ void c_lw(Register rd, Register rs1, uint16_t uimm7);
+ void c_sw(Register rs2, Register rs1, uint16_t uimm7);
+ void c_bnez(Register rs1, int16_t imm9);
+ void c_beqz(Register rs1, int16_t imm9);
+ void c_srli(Register rs1, int8_t shamt6);
+ void c_srai(Register rs1, int8_t shamt6);
+ void c_andi(Register rs1, int8_t imm6);
+
+ void c_fld(FPURegister rd, Register rs1, uint16_t uimm8);
+ void c_fsd(FPURegister rs2, Register rs1, uint16_t uimm8);
+ void c_fldsp(FPURegister rd, uint16_t uimm9);
+ void c_fsdsp(FPURegister rs2, uint16_t uimm9);
+#ifdef JS_CODEGEN_RISCV64
+ void c_ld(Register rd, Register rs1, uint16_t uimm8);
+ void c_sd(Register rs2, Register rs1, uint16_t uimm8);
+ void c_subw(Register rd, Register rs2);
+ void c_addw(Register rd, Register rs2);
+ void c_addiw(Register rd, int8_t imm6);
+ void c_ldsp(Register rd, uint16_t uimm9);
+ void c_sdsp(Register rs2, uint16_t uimm9);
+#endif
+
+ int CJumpOffset(Instr instr);
+
+ static bool IsCBranch(Instr instr);
+ static bool IsCJal(Instr instr);
+
+ inline int16_t cjump_offset(Label* L) {
+ return (int16_t)branch_offset_helper(L, OffsetSize::kOffset11);
+ }
+ inline int32_t cbranch_offset(Label* L) {
+ return branch_offset_helper(L, OffsetSize::kOffset9);
+ }
+
+ void c_j(Label* L) { c_j(cjump_offset(L)); }
+ void c_bnez(Register rs1, Label* L) { c_bnez(rs1, cbranch_offset(L)); }
+ void c_beqz(Register rs1, Label* L) { c_beqz(rs1, cbranch_offset(L)); }
+};
+} // namespace jit
+} // namespace js
+#endif // jit_riscv64_extension_Extension_riscv_C_h_
diff --git a/js/src/jit/riscv64/extension/extension-riscv-d.cc b/js/src/jit/riscv64/extension/extension-riscv-d.cc
new file mode 100644
index 0000000000..cb728baf12
--- /dev/null
+++ b/js/src/jit/riscv64/extension/extension-riscv-d.cc
@@ -0,0 +1,167 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#include "jit/riscv64/extension/extension-riscv-d.h"
+#include "jit/riscv64/Assembler-riscv64.h"
+#include "jit/riscv64/constant/Constant-riscv64.h"
+#include "jit/riscv64/Architecture-riscv64.h"
+namespace js {
+namespace jit {
+// RV32D Standard Extension
+
+void AssemblerRISCVD::fld(FPURegister rd, Register rs1, int16_t imm12) {
+ GenInstrLoadFP_ri(0b011, rd, rs1, imm12);
+}
+
+void AssemblerRISCVD::fsd(FPURegister source, Register base, int16_t imm12) {
+ GenInstrStoreFP_rri(0b011, base, source, imm12);
+}
+
+void AssemblerRISCVD::fmadd_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, FPURoundingMode frm) {
+ GenInstrR4(0b01, MADD, rd, rs1, rs2, rs3, frm);
+}
+
+void AssemblerRISCVD::fmsub_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, FPURoundingMode frm) {
+ GenInstrR4(0b01, MSUB, rd, rs1, rs2, rs3, frm);
+}
+
+void AssemblerRISCVD::fnmsub_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, FPURoundingMode frm) {
+ GenInstrR4(0b01, NMSUB, rd, rs1, rs2, rs3, frm);
+}
+
+void AssemblerRISCVD::fnmadd_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, FPURoundingMode frm) {
+ GenInstrR4(0b01, NMADD, rd, rs1, rs2, rs3, frm);
+}
+
+void AssemblerRISCVD::fadd_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b0000001, frm, rd, rs1, rs2);
+}
+
+void AssemblerRISCVD::fsub_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b0000101, frm, rd, rs1, rs2);
+}
+
+void AssemblerRISCVD::fmul_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b0001001, frm, rd, rs1, rs2);
+}
+
+void AssemblerRISCVD::fdiv_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b0001101, frm, rd, rs1, rs2);
+}
+
+void AssemblerRISCVD::fsqrt_d(FPURegister rd, FPURegister rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b0101101, frm, rd, rs1, zero_reg);
+}
+
+void AssemblerRISCVD::fsgnj_d(FPURegister rd, FPURegister rs1,
+ FPURegister rs2) {
+ GenInstrALUFP_rr(0b0010001, 0b000, rd, rs1, rs2);
+}
+
+void AssemblerRISCVD::fsgnjn_d(FPURegister rd, FPURegister rs1,
+ FPURegister rs2) {
+ GenInstrALUFP_rr(0b0010001, 0b001, rd, rs1, rs2);
+}
+
+void AssemblerRISCVD::fsgnjx_d(FPURegister rd, FPURegister rs1,
+ FPURegister rs2) {
+ GenInstrALUFP_rr(0b0010001, 0b010, rd, rs1, rs2);
+}
+
+void AssemblerRISCVD::fmin_d(FPURegister rd, FPURegister rs1, FPURegister rs2) {
+ GenInstrALUFP_rr(0b0010101, 0b000, rd, rs1, rs2);
+}
+
+void AssemblerRISCVD::fmax_d(FPURegister rd, FPURegister rs1, FPURegister rs2) {
+ GenInstrALUFP_rr(0b0010101, 0b001, rd, rs1, rs2);
+}
+
+void AssemblerRISCVD::fcvt_s_d(FPURegister rd, FPURegister rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b0100000, frm, rd, rs1, ToRegister(1));
+}
+
+void AssemblerRISCVD::fcvt_d_s(FPURegister rd, FPURegister rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b0100001, frm, rd, rs1, zero_reg);
+}
+
+void AssemblerRISCVD::feq_d(Register rd, FPURegister rs1, FPURegister rs2) {
+ GenInstrALUFP_rr(0b1010001, 0b010, rd, rs1, rs2);
+}
+
+void AssemblerRISCVD::flt_d(Register rd, FPURegister rs1, FPURegister rs2) {
+ GenInstrALUFP_rr(0b1010001, 0b001, rd, rs1, rs2);
+}
+
+void AssemblerRISCVD::fle_d(Register rd, FPURegister rs1, FPURegister rs2) {
+ GenInstrALUFP_rr(0b1010001, 0b000, rd, rs1, rs2);
+}
+
+void AssemblerRISCVD::fclass_d(Register rd, FPURegister rs1) {
+ GenInstrALUFP_rr(0b1110001, 0b001, rd, rs1, zero_reg);
+}
+
+void AssemblerRISCVD::fcvt_w_d(Register rd, FPURegister rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b1100001, frm, rd, rs1, zero_reg);
+}
+
+void AssemblerRISCVD::fcvt_wu_d(Register rd, FPURegister rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b1100001, frm, rd, rs1, ToRegister(1));
+}
+
+void AssemblerRISCVD::fcvt_d_w(FPURegister rd, Register rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b1101001, frm, rd, rs1, zero_reg);
+}
+
+void AssemblerRISCVD::fcvt_d_wu(FPURegister rd, Register rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b1101001, frm, rd, rs1, ToRegister(1));
+}
+
+#ifdef JS_CODEGEN_RISCV64
+// RV64D Standard Extension (in addition to RV32D)
+
+void AssemblerRISCVD::fcvt_l_d(Register rd, FPURegister rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b1100001, frm, rd, rs1, ToRegister(2));
+}
+
+void AssemblerRISCVD::fcvt_lu_d(Register rd, FPURegister rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b1100001, frm, rd, rs1, ToRegister(3));
+}
+
+void AssemblerRISCVD::fmv_x_d(Register rd, FPURegister rs1) {
+ GenInstrALUFP_rr(0b1110001, 0b000, rd, rs1, zero_reg);
+}
+
+void AssemblerRISCVD::fcvt_d_l(FPURegister rd, Register rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b1101001, frm, rd, rs1, ToRegister(2));
+}
+
+void AssemblerRISCVD::fcvt_d_lu(FPURegister rd, Register rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b1101001, frm, rd, rs1, ToRegister(3));
+}
+
+void AssemblerRISCVD::fmv_d_x(FPURegister rd, Register rs1) {
+ GenInstrALUFP_rr(0b1111001, 0b000, rd, rs1, zero_reg);
+}
+#endif
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/riscv64/extension/extension-riscv-d.h b/js/src/jit/riscv64/extension/extension-riscv-d.h
new file mode 100644
index 0000000000..8497c0ca63
--- /dev/null
+++ b/js/src/jit/riscv64/extension/extension-riscv-d.h
@@ -0,0 +1,68 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#ifndef jit_riscv64_extension_Extension_riscv_d_h_
+#define jit_riscv64_extension_Extension_riscv_d_h_
+#include "mozilla/Assertions.h"
+
+#include <stdint.h>
+
+#include "jit/riscv64/extension/base-assembler-riscv.h"
+#include "jit/riscv64/Register-riscv64.h"
+namespace js {
+namespace jit {
+class AssemblerRISCVD : public AssemblerRiscvBase {
+ // RV32D Standard Extension
+ public:
+ void fld(FPURegister rd, Register rs1, int16_t imm12);
+ void fsd(FPURegister source, Register base, int16_t imm12);
+ void fmadd_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, FPURoundingMode frm = RNE);
+ void fmsub_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, FPURoundingMode frm = RNE);
+ void fnmsub_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, FPURoundingMode frm = RNE);
+ void fnmadd_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, FPURoundingMode frm = RNE);
+ void fadd_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURoundingMode frm = RNE);
+ void fsub_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURoundingMode frm = RNE);
+ void fmul_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURoundingMode frm = RNE);
+ void fdiv_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURoundingMode frm = RNE);
+ void fsqrt_d(FPURegister rd, FPURegister rs1, FPURoundingMode frm = RNE);
+ void fsgnj_d(FPURegister rd, FPURegister rs1, FPURegister rs2);
+ void fsgnjn_d(FPURegister rd, FPURegister rs1, FPURegister rs2);
+ void fsgnjx_d(FPURegister rd, FPURegister rs1, FPURegister rs2);
+ void fmin_d(FPURegister rd, FPURegister rs1, FPURegister rs2);
+ void fmax_d(FPURegister rd, FPURegister rs1, FPURegister rs2);
+ void fcvt_s_d(FPURegister rd, FPURegister rs1, FPURoundingMode frm = RNE);
+ void fcvt_d_s(FPURegister rd, FPURegister rs1, FPURoundingMode frm = RNE);
+ void feq_d(Register rd, FPURegister rs1, FPURegister rs2);
+ void flt_d(Register rd, FPURegister rs1, FPURegister rs2);
+ void fle_d(Register rd, FPURegister rs1, FPURegister rs2);
+ void fclass_d(Register rd, FPURegister rs1);
+ void fcvt_w_d(Register rd, FPURegister rs1, FPURoundingMode frm = RNE);
+ void fcvt_wu_d(Register rd, FPURegister rs1, FPURoundingMode frm = RNE);
+ void fcvt_d_w(FPURegister rd, Register rs1, FPURoundingMode frm = RNE);
+ void fcvt_d_wu(FPURegister rd, Register rs1, FPURoundingMode frm = RNE);
+
+#ifdef JS_CODEGEN_RISCV64
+ // RV64D Standard Extension (in addition to RV32D)
+ void fcvt_l_d(Register rd, FPURegister rs1, FPURoundingMode frm = RNE);
+ void fcvt_lu_d(Register rd, FPURegister rs1, FPURoundingMode frm = RNE);
+ void fmv_x_d(Register rd, FPURegister rs1);
+ void fcvt_d_l(FPURegister rd, Register rs1, FPURoundingMode frm = RNE);
+ void fcvt_d_lu(FPURegister rd, Register rs1, FPURoundingMode frm = RNE);
+ void fmv_d_x(FPURegister rd, Register rs1);
+#endif
+
+ void fmv_d(FPURegister rd, FPURegister rs) { fsgnj_d(rd, rs, rs); }
+ void fabs_d(FPURegister rd, FPURegister rs) { fsgnjx_d(rd, rs, rs); }
+ void fneg_d(FPURegister rd, FPURegister rs) { fsgnjn_d(rd, rs, rs); }
+};
+} // namespace jit
+} // namespace js
+#endif // jit_riscv64_extension_Extension_riscv_D_h_
diff --git a/js/src/jit/riscv64/extension/extension-riscv-f.cc b/js/src/jit/riscv64/extension/extension-riscv-f.cc
new file mode 100644
index 0000000000..44e1fdc495
--- /dev/null
+++ b/js/src/jit/riscv64/extension/extension-riscv-f.cc
@@ -0,0 +1,158 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#include "jit/riscv64/extension/extension-riscv-f.h"
+#include "jit/riscv64/Assembler-riscv64.h"
+#include "jit/riscv64/constant/Constant-riscv64.h"
+#include "jit/riscv64/Architecture-riscv64.h"
+namespace js {
+namespace jit {
+
+// RV32F Standard Extension
+
+void AssemblerRISCVF::flw(FPURegister rd, Register rs1, int16_t imm12) {
+ GenInstrLoadFP_ri(0b010, rd, rs1, imm12);
+}
+
+void AssemblerRISCVF::fsw(FPURegister source, Register base, int16_t imm12) {
+ GenInstrStoreFP_rri(0b010, base, source, imm12);
+}
+
+void AssemblerRISCVF::fmadd_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, FPURoundingMode frm) {
+ GenInstrR4(0b00, MADD, rd, rs1, rs2, rs3, frm);
+}
+
+void AssemblerRISCVF::fmsub_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, FPURoundingMode frm) {
+ GenInstrR4(0b00, MSUB, rd, rs1, rs2, rs3, frm);
+}
+
+void AssemblerRISCVF::fnmsub_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, FPURoundingMode frm) {
+ GenInstrR4(0b00, NMSUB, rd, rs1, rs2, rs3, frm);
+}
+
+void AssemblerRISCVF::fnmadd_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, FPURoundingMode frm) {
+ GenInstrR4(0b00, NMADD, rd, rs1, rs2, rs3, frm);
+}
+
+void AssemblerRISCVF::fadd_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b0000000, frm, rd, rs1, rs2);
+}
+
+void AssemblerRISCVF::fsub_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b0000100, frm, rd, rs1, rs2);
+}
+
+void AssemblerRISCVF::fmul_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b0001000, frm, rd, rs1, rs2);
+}
+
+void AssemblerRISCVF::fdiv_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b0001100, frm, rd, rs1, rs2);
+}
+
+void AssemblerRISCVF::fsqrt_s(FPURegister rd, FPURegister rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b0101100, frm, rd, rs1, zero_reg);
+}
+
+void AssemblerRISCVF::fsgnj_s(FPURegister rd, FPURegister rs1,
+ FPURegister rs2) {
+ GenInstrALUFP_rr(0b0010000, 0b000, rd, rs1, rs2);
+}
+
+void AssemblerRISCVF::fsgnjn_s(FPURegister rd, FPURegister rs1,
+ FPURegister rs2) {
+ GenInstrALUFP_rr(0b0010000, 0b001, rd, rs1, rs2);
+}
+
+void AssemblerRISCVF::fsgnjx_s(FPURegister rd, FPURegister rs1,
+ FPURegister rs2) {
+ GenInstrALUFP_rr(0b0010000, 0b010, rd, rs1, rs2);
+}
+
+void AssemblerRISCVF::fmin_s(FPURegister rd, FPURegister rs1, FPURegister rs2) {
+ GenInstrALUFP_rr(0b0010100, 0b000, rd, rs1, rs2);
+}
+
+void AssemblerRISCVF::fmax_s(FPURegister rd, FPURegister rs1, FPURegister rs2) {
+ GenInstrALUFP_rr(0b0010100, 0b001, rd, rs1, rs2);
+}
+
+void AssemblerRISCVF::fcvt_w_s(Register rd, FPURegister rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b1100000, frm, rd, rs1, zero_reg);
+}
+
+void AssemblerRISCVF::fcvt_wu_s(Register rd, FPURegister rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b1100000, frm, rd, rs1, ToRegister(1));
+}
+
+void AssemblerRISCVF::fmv_x_w(Register rd, FPURegister rs1) {
+ GenInstrALUFP_rr(0b1110000, 0b000, rd, rs1, zero_reg);
+}
+
+void AssemblerRISCVF::feq_s(Register rd, FPURegister rs1, FPURegister rs2) {
+ GenInstrALUFP_rr(0b1010000, 0b010, rd, rs1, rs2);
+}
+
+void AssemblerRISCVF::flt_s(Register rd, FPURegister rs1, FPURegister rs2) {
+ GenInstrALUFP_rr(0b1010000, 0b001, rd, rs1, rs2);
+}
+
+void AssemblerRISCVF::fle_s(Register rd, FPURegister rs1, FPURegister rs2) {
+ GenInstrALUFP_rr(0b1010000, 0b000, rd, rs1, rs2);
+}
+
+void AssemblerRISCVF::fclass_s(Register rd, FPURegister rs1) {
+ GenInstrALUFP_rr(0b1110000, 0b001, rd, rs1, zero_reg);
+}
+
+void AssemblerRISCVF::fcvt_s_w(FPURegister rd, Register rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b1101000, frm, rd, rs1, zero_reg);
+}
+
+void AssemblerRISCVF::fcvt_s_wu(FPURegister rd, Register rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b1101000, frm, rd, rs1, ToRegister(1));
+}
+
+void AssemblerRISCVF::fmv_w_x(FPURegister rd, Register rs1) {
+ GenInstrALUFP_rr(0b1111000, 0b000, rd, rs1, zero_reg);
+}
+
+#ifdef JS_CODEGEN_RISCV64
+// RV64F Standard Extension (in addition to RV32F)
+
+void AssemblerRISCVF::fcvt_l_s(Register rd, FPURegister rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b1100000, frm, rd, rs1, ToRegister(2));
+}
+
+void AssemblerRISCVF::fcvt_lu_s(Register rd, FPURegister rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b1100000, frm, rd, rs1, ToRegister(3));
+}
+
+void AssemblerRISCVF::fcvt_s_l(FPURegister rd, Register rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b1101000, frm, rd, rs1, ToRegister(2));
+}
+
+void AssemblerRISCVF::fcvt_s_lu(FPURegister rd, Register rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b1101000, frm, rd, rs1, ToRegister(3));
+}
+#endif
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/riscv64/extension/extension-riscv-f.h b/js/src/jit/riscv64/extension/extension-riscv-f.h
new file mode 100644
index 0000000000..3ab46ffcf6
--- /dev/null
+++ b/js/src/jit/riscv64/extension/extension-riscv-f.h
@@ -0,0 +1,66 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#ifndef jit_riscv64_extension_Extension_riscv_f_h_
+#define jit_riscv64_extension_Extension_riscv_f_h_
+#include "mozilla/Assertions.h"
+
+#include <stdint.h>
+
+#include "jit/riscv64/extension/base-assembler-riscv.h"
+#include "jit/riscv64/Register-riscv64.h"
+namespace js {
+namespace jit {
+class AssemblerRISCVF : public AssemblerRiscvBase {
+ // RV32F Standard Extension
+ public:
+ void flw(FPURegister rd, Register rs1, int16_t imm12);
+ void fsw(FPURegister source, Register base, int16_t imm12);
+ void fmadd_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, FPURoundingMode frm = RNE);
+ void fmsub_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, FPURoundingMode frm = RNE);
+ void fnmsub_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, FPURoundingMode frm = RNE);
+ void fnmadd_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, FPURoundingMode frm = RNE);
+ void fadd_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURoundingMode frm = RNE);
+ void fsub_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURoundingMode frm = RNE);
+ void fmul_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURoundingMode frm = RNE);
+ void fdiv_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURoundingMode frm = RNE);
+ void fsqrt_s(FPURegister rd, FPURegister rs1, FPURoundingMode frm = RNE);
+ void fsgnj_s(FPURegister rd, FPURegister rs1, FPURegister rs2);
+ void fsgnjn_s(FPURegister rd, FPURegister rs1, FPURegister rs2);
+ void fsgnjx_s(FPURegister rd, FPURegister rs1, FPURegister rs2);
+ void fmin_s(FPURegister rd, FPURegister rs1, FPURegister rs2);
+ void fmax_s(FPURegister rd, FPURegister rs1, FPURegister rs2);
+ void fcvt_w_s(Register rd, FPURegister rs1, FPURoundingMode frm = RNE);
+ void fcvt_wu_s(Register rd, FPURegister rs1, FPURoundingMode frm = RNE);
+ void fmv_x_w(Register rd, FPURegister rs1);
+ void feq_s(Register rd, FPURegister rs1, FPURegister rs2);
+ void flt_s(Register rd, FPURegister rs1, FPURegister rs2);
+ void fle_s(Register rd, FPURegister rs1, FPURegister rs2);
+ void fclass_s(Register rd, FPURegister rs1);
+ void fcvt_s_w(FPURegister rd, Register rs1, FPURoundingMode frm = RNE);
+ void fcvt_s_wu(FPURegister rd, Register rs1, FPURoundingMode frm = RNE);
+ void fmv_w_x(FPURegister rd, Register rs1);
+
+#ifdef JS_CODEGEN_RISCV64
+ // RV64F Standard Extension (in addition to RV32F)
+ void fcvt_l_s(Register rd, FPURegister rs1, FPURoundingMode frm = RNE);
+ void fcvt_lu_s(Register rd, FPURegister rs1, FPURoundingMode frm = RNE);
+ void fcvt_s_l(FPURegister rd, Register rs1, FPURoundingMode frm = RNE);
+ void fcvt_s_lu(FPURegister rd, Register rs1, FPURoundingMode frm = RNE);
+#endif
+
+ void fmv_s(FPURegister rd, FPURegister rs) { fsgnj_s(rd, rs, rs); }
+ void fabs_s(FPURegister rd, FPURegister rs) { fsgnjx_s(rd, rs, rs); }
+ void fneg_s(FPURegister rd, FPURegister rs) { fsgnjn_s(rd, rs, rs); }
+};
+} // namespace jit
+} // namespace js
+#endif // jit_riscv64_extension_Extension_riscv_F_h_
diff --git a/js/src/jit/riscv64/extension/extension-riscv-m.cc b/js/src/jit/riscv64/extension/extension-riscv-m.cc
new file mode 100644
index 0000000000..b5fcd6c34c
--- /dev/null
+++ b/js/src/jit/riscv64/extension/extension-riscv-m.cc
@@ -0,0 +1,68 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#include "jit/riscv64/extension/extension-riscv-m.h"
+#include "jit/riscv64/Assembler-riscv64.h"
+#include "jit/riscv64/constant/Constant-riscv64.h"
+#include "jit/riscv64/Architecture-riscv64.h"
+namespace js {
+namespace jit {
+// RV32M Standard Extension
+
+void AssemblerRISCVM::mul(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000001, 0b000, rd, rs1, rs2);
+}
+
+void AssemblerRISCVM::mulh(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000001, 0b001, rd, rs1, rs2);
+}
+
+void AssemblerRISCVM::mulhsu(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000001, 0b010, rd, rs1, rs2);
+}
+
+void AssemblerRISCVM::mulhu(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000001, 0b011, rd, rs1, rs2);
+}
+
+void AssemblerRISCVM::div(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000001, 0b100, rd, rs1, rs2);
+}
+
+void AssemblerRISCVM::divu(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000001, 0b101, rd, rs1, rs2);
+}
+
+void AssemblerRISCVM::rem(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000001, 0b110, rd, rs1, rs2);
+}
+
+void AssemblerRISCVM::remu(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000001, 0b111, rd, rs1, rs2);
+}
+
+#ifdef JS_CODEGEN_RISCV64
+// RV64M Standard Extension (in addition to RV32M)
+
+void AssemblerRISCVM::mulw(Register rd, Register rs1, Register rs2) {
+ GenInstrALUW_rr(0b0000001, 0b000, rd, rs1, rs2);
+}
+
+void AssemblerRISCVM::divw(Register rd, Register rs1, Register rs2) {
+ GenInstrALUW_rr(0b0000001, 0b100, rd, rs1, rs2);
+}
+
+void AssemblerRISCVM::divuw(Register rd, Register rs1, Register rs2) {
+ GenInstrALUW_rr(0b0000001, 0b101, rd, rs1, rs2);
+}
+
+void AssemblerRISCVM::remw(Register rd, Register rs1, Register rs2) {
+ GenInstrALUW_rr(0b0000001, 0b110, rd, rs1, rs2);
+}
+
+void AssemblerRISCVM::remuw(Register rd, Register rs1, Register rs2) {
+ GenInstrALUW_rr(0b0000001, 0b111, rd, rs1, rs2);
+}
+#endif
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/riscv64/extension/extension-riscv-m.h b/js/src/jit/riscv64/extension/extension-riscv-m.h
new file mode 100644
index 0000000000..7c2c932516
--- /dev/null
+++ b/js/src/jit/riscv64/extension/extension-riscv-m.h
@@ -0,0 +1,37 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef jit_riscv64_extension_Extension_riscv_m_h_
+#define jit_riscv64_extension_Extension_riscv_m_h_
+#include "mozilla/Assertions.h"
+
+#include <stdint.h>
+
+#include "jit/riscv64/extension/base-assembler-riscv.h"
+#include "jit/riscv64/Register-riscv64.h"
+namespace js {
+namespace jit {
+class AssemblerRISCVM : public AssemblerRiscvBase {
+ // RV32M Standard Extension
+ public:
+ void mul(Register rd, Register rs1, Register rs2);
+ void mulh(Register rd, Register rs1, Register rs2);
+ void mulhsu(Register rd, Register rs1, Register rs2);
+ void mulhu(Register rd, Register rs1, Register rs2);
+ void div(Register rd, Register rs1, Register rs2);
+ void divu(Register rd, Register rs1, Register rs2);
+ void rem(Register rd, Register rs1, Register rs2);
+ void remu(Register rd, Register rs1, Register rs2);
+#ifdef JS_CODEGEN_RISCV64
+ // RV64M Standard Extension (in addition to RV32M)
+ void mulw(Register rd, Register rs1, Register rs2);
+ void divw(Register rd, Register rs1, Register rs2);
+ void divuw(Register rd, Register rs1, Register rs2);
+ void remw(Register rd, Register rs1, Register rs2);
+ void remuw(Register rd, Register rs1, Register rs2);
+#endif
+};
+} // namespace jit
+} // namespace js
+#endif // jit_riscv64_extension_Extension_riscv_M_h_
diff --git a/js/src/jit/riscv64/extension/extension-riscv-v.cc b/js/src/jit/riscv64/extension/extension-riscv-v.cc
new file mode 100644
index 0000000000..c7241158e0
--- /dev/null
+++ b/js/src/jit/riscv64/extension/extension-riscv-v.cc
@@ -0,0 +1,891 @@
+
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "jit/riscv64/extension/extension-riscv-v.h"
+
+#ifdef CAN_USE_RVV
+# include "src/codegen/assembler.h"
+# include "jit/riscv64/constant/Constant-riscv64.h"
+# include "jit/riscv64/extension/register-riscv.h"
+
+namespace js {
+namespace jit {
+
+// RVV
+
+void AssemblerRISCVV::vredmaxu_vs(VRegister vd, VRegister vs2, VRegister vs1,
+ MaskType mask) {
+ GenInstrV(VREDMAXU_FUNCT6, OP_MVV, vd, vs1, vs2, mask);
+}
+
+void AssemblerRISCVV::vredmax_vs(VRegister vd, VRegister vs2, VRegister vs1,
+ MaskType mask) {
+ GenInstrV(VREDMAX_FUNCT6, OP_MVV, vd, vs1, vs2, mask);
+}
+
+void AssemblerRISCVV::vredmin_vs(VRegister vd, VRegister vs2, VRegister vs1,
+ MaskType mask) {
+ GenInstrV(VREDMIN_FUNCT6, OP_MVV, vd, vs1, vs2, mask);
+}
+
+void AssemblerRISCVV::vredminu_vs(VRegister vd, VRegister vs2, VRegister vs1,
+ MaskType mask) {
+ GenInstrV(VREDMINU_FUNCT6, OP_MVV, vd, vs1, vs2, mask);
+}
+
+void AssemblerRISCVV::vmv_vv(VRegister vd, VRegister vs1) {
+ GenInstrV(VMV_FUNCT6, OP_IVV, vd, vs1, v0, NoMask);
+}
+
+void AssemblerRISCVV::vmv_vx(VRegister vd, Register rs1) {
+ GenInstrV(VMV_FUNCT6, OP_IVX, vd, rs1, v0, NoMask);
+}
+
+void AssemblerRISCVV::vmv_vi(VRegister vd, uint8_t simm5) {
+ GenInstrV(VMV_FUNCT6, vd, simm5, v0, NoMask);
+}
+
+void AssemblerRISCVV::vmv_xs(Register rd, VRegister vs2) {
+ GenInstrV(VWXUNARY0_FUNCT6, OP_MVV, rd, 0b00000, vs2, NoMask);
+}
+
+void AssemblerRISCVV::vmv_sx(VRegister vd, Register rs1) {
+ GenInstrV(VRXUNARY0_FUNCT6, OP_MVX, vd, rs1, v0, NoMask);
+}
+
+void AssemblerRISCVV::vmerge_vv(VRegister vd, VRegister vs1, VRegister vs2) {
+ GenInstrV(VMV_FUNCT6, OP_IVV, vd, vs1, vs2, Mask);
+}
+
+void AssemblerRISCVV::vmerge_vx(VRegister vd, Register rs1, VRegister vs2) {
+ GenInstrV(VMV_FUNCT6, OP_IVX, vd, rs1, vs2, Mask);
+}
+
+void AssemblerRISCVV::vmerge_vi(VRegister vd, uint8_t imm5, VRegister vs2) {
+ GenInstrV(VMV_FUNCT6, vd, imm5, vs2, Mask);
+}
+
+void AssemblerRISCVV::vadc_vv(VRegister vd, VRegister vs1, VRegister vs2) {
+ GenInstrV(VADC_FUNCT6, OP_IVV, vd, vs1, vs2, Mask);
+}
+
+void AssemblerRISCVV::vadc_vx(VRegister vd, Register rs1, VRegister vs2) {
+ GenInstrV(VADC_FUNCT6, OP_IVX, vd, rs1, vs2, Mask);
+}
+
+void AssemblerRISCVV::vadc_vi(VRegister vd, uint8_t imm5, VRegister vs2) {
+ GenInstrV(VADC_FUNCT6, vd, imm5, vs2, Mask);
+}
+
+void AssemblerRISCVV::vmadc_vv(VRegister vd, VRegister vs1, VRegister vs2) {
+ GenInstrV(VMADC_FUNCT6, OP_IVV, vd, vs1, vs2, Mask);
+}
+
+void AssemblerRISCVV::vmadc_vx(VRegister vd, Register rs1, VRegister vs2) {
+ GenInstrV(VMADC_FUNCT6, OP_IVX, vd, rs1, vs2, Mask);
+}
+
+void AssemblerRISCVV::vmadc_vi(VRegister vd, uint8_t imm5, VRegister vs2) {
+ GenInstrV(VMADC_FUNCT6, vd, imm5, vs2, Mask);
+}
+
+void AssemblerRISCVV::vrgather_vv(VRegister vd, VRegister vs2, VRegister vs1,
+ MaskType mask) {
+ DCHECK_NE(vd, vs1);
+ DCHECK_NE(vd, vs2);
+ GenInstrV(VRGATHER_FUNCT6, OP_IVV, vd, vs1, vs2, mask);
+}
+
+void AssemblerRISCVV::vrgather_vi(VRegister vd, VRegister vs2, int8_t imm5,
+ MaskType mask) {
+ DCHECK_NE(vd, vs2);
+ GenInstrV(VRGATHER_FUNCT6, vd, imm5, vs2, mask);
+}
+
+void AssemblerRISCVV::vrgather_vx(VRegister vd, VRegister vs2, Register rs1,
+ MaskType mask) {
+ DCHECK_NE(vd, vs2);
+ GenInstrV(VRGATHER_FUNCT6, OP_IVX, vd, rs1, vs2, mask);
+}
+
+void AssemblerRISCVV::vwaddu_wx(VRegister vd, VRegister vs2, Register rs1,
+ MaskType mask) {
+ GenInstrV(VWADDUW_FUNCT6, OP_MVX, vd, rs1, vs2, mask);
+}
+
+void AssemblerRISCVV::vid_v(VRegister vd, MaskType mask) {
+ GenInstrV(VMUNARY0_FUNCT6, OP_MVV, vd, VID_V, v0, mask);
+}
+
+# define DEFINE_OPIVV(name, funct6) \
+ void AssemblerRISCVV::name##_vv(VRegister vd, VRegister vs2, \
+ VRegister vs1, MaskType mask) { \
+ GenInstrV(funct6, OP_IVV, vd, vs1, vs2, mask); \
+ }
+
+# define DEFINE_OPFVV(name, funct6) \
+ void AssemblerRISCVV::name##_vv(VRegister vd, VRegister vs2, \
+ VRegister vs1, MaskType mask) { \
+ GenInstrV(funct6, OP_FVV, vd, vs1, vs2, mask); \
+ }
+
+# define DEFINE_OPFWV(name, funct6) \
+ void AssemblerRISCVV::name##_wv(VRegister vd, VRegister vs2, \
+ VRegister vs1, MaskType mask) { \
+ GenInstrV(funct6, OP_FVV, vd, vs1, vs2, mask); \
+ }
+
+# define DEFINE_OPFRED(name, funct6) \
+ void AssemblerRISCVV::name##_vs(VRegister vd, VRegister vs2, \
+ VRegister vs1, MaskType mask) { \
+ GenInstrV(funct6, OP_FVV, vd, vs1, vs2, mask); \
+ }
+
+# define DEFINE_OPIVX(name, funct6) \
+ void AssemblerRISCVV::name##_vx(VRegister vd, VRegister vs2, Register rs1, \
+ MaskType mask) { \
+ GenInstrV(funct6, OP_IVX, vd, rs1, vs2, mask); \
+ }
+
+# define DEFINE_OPIVI(name, funct6) \
+ void AssemblerRISCVV::name##_vi(VRegister vd, VRegister vs2, int8_t imm5, \
+ MaskType mask) { \
+ GenInstrV(funct6, vd, imm5, vs2, mask); \
+ }
+
+# define DEFINE_OPMVV(name, funct6) \
+ void AssemblerRISCVV::name##_vv(VRegister vd, VRegister vs2, \
+ VRegister vs1, MaskType mask) { \
+ GenInstrV(funct6, OP_MVV, vd, vs1, vs2, mask); \
+ }
+
+// void GenInstrV(uint8_t funct6, OpcodeRISCVV opcode, VRegister vd, Register
+// rs1,
+// VRegister vs2, MaskType mask = NoMask);
+# define DEFINE_OPMVX(name, funct6) \
+ void AssemblerRISCVV::name##_vx(VRegister vd, VRegister vs2, Register rs1, \
+ MaskType mask) { \
+ GenInstrV(funct6, OP_MVX, vd, rs1, vs2, mask); \
+ }
+
+# define DEFINE_OPFVF(name, funct6) \
+ void AssemblerRISCVV::name##_vf(VRegister vd, VRegister vs2, \
+ FPURegister fs1, MaskType mask) { \
+ GenInstrV(funct6, OP_FVF, vd, fs1, vs2, mask); \
+ }
+
+# define DEFINE_OPFWF(name, funct6) \
+ void AssemblerRISCVV::name##_wf(VRegister vd, VRegister vs2, \
+ FPURegister fs1, MaskType mask) { \
+ GenInstrV(funct6, OP_FVF, vd, fs1, vs2, mask); \
+ }
+
+# define DEFINE_OPFVV_FMA(name, funct6) \
+ void AssemblerRISCVV::name##_vv(VRegister vd, VRegister vs1, \
+ VRegister vs2, MaskType mask) { \
+ GenInstrV(funct6, OP_FVV, vd, vs1, vs2, mask); \
+ }
+
+# define DEFINE_OPFVF_FMA(name, funct6) \
+ void AssemblerRISCVV::name##_vf(VRegister vd, FPURegister fs1, \
+ VRegister vs2, MaskType mask) { \
+ GenInstrV(funct6, OP_FVF, vd, fs1, vs2, mask); \
+ }
+
+// vector integer extension
+# define DEFINE_OPMVV_VIE(name, vs1) \
+ void AssemblerRISCVV::name(VRegister vd, VRegister vs2, MaskType mask) { \
+ GenInstrV(VXUNARY0_FUNCT6, OP_MVV, vd, vs1, vs2, mask); \
+ }
+
+void AssemblerRISCVV::vfmv_vf(VRegister vd, FPURegister fs1, MaskType mask) {
+ GenInstrV(VMV_FUNCT6, OP_FVF, vd, fs1, v0, mask);
+}
+
+void AssemblerRISCVV::vfmv_fs(FPURegister fd, VRegister vs2) {
+ GenInstrV(VWFUNARY0_FUNCT6, OP_FVV, fd, v0, vs2, NoMask);
+}
+
+void AssemblerRISCVV::vfmv_sf(VRegister vd, FPURegister fs) {
+ GenInstrV(VRFUNARY0_FUNCT6, OP_FVF, vd, fs, v0, NoMask);
+}
+
+DEFINE_OPIVV(vadd, VADD_FUNCT6)
+DEFINE_OPIVX(vadd, VADD_FUNCT6)
+DEFINE_OPIVI(vadd, VADD_FUNCT6)
+DEFINE_OPIVV(vsub, VSUB_FUNCT6)
+DEFINE_OPIVX(vsub, VSUB_FUNCT6)
+DEFINE_OPMVX(vdiv, VDIV_FUNCT6)
+DEFINE_OPMVX(vdivu, VDIVU_FUNCT6)
+DEFINE_OPMVX(vmul, VMUL_FUNCT6)
+DEFINE_OPMVX(vmulhu, VMULHU_FUNCT6)
+DEFINE_OPMVX(vmulhsu, VMULHSU_FUNCT6)
+DEFINE_OPMVX(vmulh, VMULH_FUNCT6)
+DEFINE_OPMVV(vdiv, VDIV_FUNCT6)
+DEFINE_OPMVV(vdivu, VDIVU_FUNCT6)
+DEFINE_OPMVV(vmul, VMUL_FUNCT6)
+DEFINE_OPMVV(vmulhu, VMULHU_FUNCT6)
+DEFINE_OPMVV(vmulhsu, VMULHSU_FUNCT6)
+DEFINE_OPMVV(vwmul, VWMUL_FUNCT6)
+DEFINE_OPMVV(vwmulu, VWMULU_FUNCT6)
+DEFINE_OPMVV(vmulh, VMULH_FUNCT6)
+DEFINE_OPMVV(vwadd, VWADD_FUNCT6)
+DEFINE_OPMVV(vwaddu, VWADDU_FUNCT6)
+DEFINE_OPMVV(vcompress, VCOMPRESS_FUNCT6)
+DEFINE_OPIVX(vsadd, VSADD_FUNCT6)
+DEFINE_OPIVV(vsadd, VSADD_FUNCT6)
+DEFINE_OPIVI(vsadd, VSADD_FUNCT6)
+DEFINE_OPIVX(vsaddu, VSADDU_FUNCT6)
+DEFINE_OPIVV(vsaddu, VSADDU_FUNCT6)
+DEFINE_OPIVI(vsaddu, VSADDU_FUNCT6)
+DEFINE_OPIVX(vssub, VSSUB_FUNCT6)
+DEFINE_OPIVV(vssub, VSSUB_FUNCT6)
+DEFINE_OPIVX(vssubu, VSSUBU_FUNCT6)
+DEFINE_OPIVV(vssubu, VSSUBU_FUNCT6)
+DEFINE_OPIVX(vrsub, VRSUB_FUNCT6)
+DEFINE_OPIVI(vrsub, VRSUB_FUNCT6)
+DEFINE_OPIVV(vminu, VMINU_FUNCT6)
+DEFINE_OPIVX(vminu, VMINU_FUNCT6)
+DEFINE_OPIVV(vmin, VMIN_FUNCT6)
+DEFINE_OPIVX(vmin, VMIN_FUNCT6)
+DEFINE_OPIVV(vmaxu, VMAXU_FUNCT6)
+DEFINE_OPIVX(vmaxu, VMAXU_FUNCT6)
+DEFINE_OPIVV(vmax, VMAX_FUNCT6)
+DEFINE_OPIVX(vmax, VMAX_FUNCT6)
+DEFINE_OPIVV(vand, VAND_FUNCT6)
+DEFINE_OPIVX(vand, VAND_FUNCT6)
+DEFINE_OPIVI(vand, VAND_FUNCT6)
+DEFINE_OPIVV(vor, VOR_FUNCT6)
+DEFINE_OPIVX(vor, VOR_FUNCT6)
+DEFINE_OPIVI(vor, VOR_FUNCT6)
+DEFINE_OPIVV(vxor, VXOR_FUNCT6)
+DEFINE_OPIVX(vxor, VXOR_FUNCT6)
+DEFINE_OPIVI(vxor, VXOR_FUNCT6)
+
+DEFINE_OPIVX(vslidedown, VSLIDEDOWN_FUNCT6)
+DEFINE_OPIVI(vslidedown, VSLIDEDOWN_FUNCT6)
+DEFINE_OPIVX(vslideup, VSLIDEUP_FUNCT6)
+DEFINE_OPIVI(vslideup, VSLIDEUP_FUNCT6)
+
+DEFINE_OPIVV(vmseq, VMSEQ_FUNCT6)
+DEFINE_OPIVX(vmseq, VMSEQ_FUNCT6)
+DEFINE_OPIVI(vmseq, VMSEQ_FUNCT6)
+
+DEFINE_OPIVV(vmsne, VMSNE_FUNCT6)
+DEFINE_OPIVX(vmsne, VMSNE_FUNCT6)
+DEFINE_OPIVI(vmsne, VMSNE_FUNCT6)
+
+DEFINE_OPIVV(vmsltu, VMSLTU_FUNCT6)
+DEFINE_OPIVX(vmsltu, VMSLTU_FUNCT6)
+
+DEFINE_OPIVV(vmslt, VMSLT_FUNCT6)
+DEFINE_OPIVX(vmslt, VMSLT_FUNCT6)
+
+DEFINE_OPIVV(vmsle, VMSLE_FUNCT6)
+DEFINE_OPIVX(vmsle, VMSLE_FUNCT6)
+DEFINE_OPIVI(vmsle, VMSLE_FUNCT6)
+
+DEFINE_OPIVV(vmsleu, VMSLEU_FUNCT6)
+DEFINE_OPIVX(vmsleu, VMSLEU_FUNCT6)
+DEFINE_OPIVI(vmsleu, VMSLEU_FUNCT6)
+
+DEFINE_OPIVI(vmsgt, VMSGT_FUNCT6)
+DEFINE_OPIVX(vmsgt, VMSGT_FUNCT6)
+
+DEFINE_OPIVI(vmsgtu, VMSGTU_FUNCT6)
+DEFINE_OPIVX(vmsgtu, VMSGTU_FUNCT6)
+
+DEFINE_OPIVV(vsrl, VSRL_FUNCT6)
+DEFINE_OPIVX(vsrl, VSRL_FUNCT6)
+DEFINE_OPIVI(vsrl, VSRL_FUNCT6)
+
+DEFINE_OPIVV(vsra, VSRA_FUNCT6)
+DEFINE_OPIVX(vsra, VSRA_FUNCT6)
+DEFINE_OPIVI(vsra, VSRA_FUNCT6)
+
+DEFINE_OPIVV(vsll, VSLL_FUNCT6)
+DEFINE_OPIVX(vsll, VSLL_FUNCT6)
+DEFINE_OPIVI(vsll, VSLL_FUNCT6)
+
+DEFINE_OPIVV(vsmul, VSMUL_FUNCT6)
+DEFINE_OPIVX(vsmul, VSMUL_FUNCT6)
+
+DEFINE_OPFVV(vfadd, VFADD_FUNCT6)
+DEFINE_OPFVF(vfadd, VFADD_FUNCT6)
+DEFINE_OPFVV(vfsub, VFSUB_FUNCT6)
+DEFINE_OPFVF(vfsub, VFSUB_FUNCT6)
+DEFINE_OPFVV(vfdiv, VFDIV_FUNCT6)
+DEFINE_OPFVF(vfdiv, VFDIV_FUNCT6)
+DEFINE_OPFVV(vfmul, VFMUL_FUNCT6)
+DEFINE_OPFVF(vfmul, VFMUL_FUNCT6)
+DEFINE_OPFVV(vmfeq, VMFEQ_FUNCT6)
+DEFINE_OPFVV(vmfne, VMFNE_FUNCT6)
+DEFINE_OPFVV(vmflt, VMFLT_FUNCT6)
+DEFINE_OPFVV(vmfle, VMFLE_FUNCT6)
+DEFINE_OPFVV(vfmax, VFMAX_FUNCT6)
+DEFINE_OPFVV(vfmin, VFMIN_FUNCT6)
+
+// Vector Widening Floating-Point Add/Subtract Instructions
+DEFINE_OPFVV(vfwadd, VFWADD_FUNCT6)
+DEFINE_OPFVF(vfwadd, VFWADD_FUNCT6)
+DEFINE_OPFVV(vfwsub, VFWSUB_FUNCT6)
+DEFINE_OPFVF(vfwsub, VFWSUB_FUNCT6)
+DEFINE_OPFWV(vfwadd, VFWADD_W_FUNCT6)
+DEFINE_OPFWF(vfwadd, VFWADD_W_FUNCT6)
+DEFINE_OPFWV(vfwsub, VFWSUB_W_FUNCT6)
+DEFINE_OPFWF(vfwsub, VFWSUB_W_FUNCT6)
+
+// Vector Widening Floating-Point Reduction Instructions
+DEFINE_OPFVV(vfwredusum, VFWREDUSUM_FUNCT6)
+DEFINE_OPFVV(vfwredosum, VFWREDOSUM_FUNCT6)
+
+// Vector Widening Floating-Point Multiply
+DEFINE_OPFVV(vfwmul, VFWMUL_FUNCT6)
+DEFINE_OPFVF(vfwmul, VFWMUL_FUNCT6)
+
+DEFINE_OPFRED(vfredmax, VFREDMAX_FUNCT6)
+
+DEFINE_OPFVV(vfsngj, VFSGNJ_FUNCT6)
+DEFINE_OPFVF(vfsngj, VFSGNJ_FUNCT6)
+DEFINE_OPFVV(vfsngjn, VFSGNJN_FUNCT6)
+DEFINE_OPFVF(vfsngjn, VFSGNJN_FUNCT6)
+DEFINE_OPFVV(vfsngjx, VFSGNJX_FUNCT6)
+DEFINE_OPFVF(vfsngjx, VFSGNJX_FUNCT6)
+
+// Vector Single-Width Floating-Point Fused Multiply-Add Instructions
+DEFINE_OPFVV_FMA(vfmadd, VFMADD_FUNCT6)
+DEFINE_OPFVF_FMA(vfmadd, VFMADD_FUNCT6)
+DEFINE_OPFVV_FMA(vfmsub, VFMSUB_FUNCT6)
+DEFINE_OPFVF_FMA(vfmsub, VFMSUB_FUNCT6)
+DEFINE_OPFVV_FMA(vfmacc, VFMACC_FUNCT6)
+DEFINE_OPFVF_FMA(vfmacc, VFMACC_FUNCT6)
+DEFINE_OPFVV_FMA(vfmsac, VFMSAC_FUNCT6)
+DEFINE_OPFVF_FMA(vfmsac, VFMSAC_FUNCT6)
+DEFINE_OPFVV_FMA(vfnmadd, VFNMADD_FUNCT6)
+DEFINE_OPFVF_FMA(vfnmadd, VFNMADD_FUNCT6)
+DEFINE_OPFVV_FMA(vfnmsub, VFNMSUB_FUNCT6)
+DEFINE_OPFVF_FMA(vfnmsub, VFNMSUB_FUNCT6)
+DEFINE_OPFVV_FMA(vfnmacc, VFNMACC_FUNCT6)
+DEFINE_OPFVF_FMA(vfnmacc, VFNMACC_FUNCT6)
+DEFINE_OPFVV_FMA(vfnmsac, VFNMSAC_FUNCT6)
+DEFINE_OPFVF_FMA(vfnmsac, VFNMSAC_FUNCT6)
+
+// Vector Widening Floating-Point Fused Multiply-Add Instructions
+DEFINE_OPFVV_FMA(vfwmacc, VFWMACC_FUNCT6)
+DEFINE_OPFVF_FMA(vfwmacc, VFWMACC_FUNCT6)
+DEFINE_OPFVV_FMA(vfwnmacc, VFWNMACC_FUNCT6)
+DEFINE_OPFVF_FMA(vfwnmacc, VFWNMACC_FUNCT6)
+DEFINE_OPFVV_FMA(vfwmsac, VFWMSAC_FUNCT6)
+DEFINE_OPFVF_FMA(vfwmsac, VFWMSAC_FUNCT6)
+DEFINE_OPFVV_FMA(vfwnmsac, VFWNMSAC_FUNCT6)
+DEFINE_OPFVF_FMA(vfwnmsac, VFWNMSAC_FUNCT6)
+
+// Vector Narrowing Fixed-Point Clip Instructions
+DEFINE_OPIVV(vnclip, VNCLIP_FUNCT6)
+DEFINE_OPIVX(vnclip, VNCLIP_FUNCT6)
+DEFINE_OPIVI(vnclip, VNCLIP_FUNCT6)
+DEFINE_OPIVV(vnclipu, VNCLIPU_FUNCT6)
+DEFINE_OPIVX(vnclipu, VNCLIPU_FUNCT6)
+DEFINE_OPIVI(vnclipu, VNCLIPU_FUNCT6)
+
+// Vector Integer Extension
+DEFINE_OPMVV_VIE(vzext_vf8, 0b00010)
+DEFINE_OPMVV_VIE(vsext_vf8, 0b00011)
+DEFINE_OPMVV_VIE(vzext_vf4, 0b00100)
+DEFINE_OPMVV_VIE(vsext_vf4, 0b00101)
+DEFINE_OPMVV_VIE(vzext_vf2, 0b00110)
+DEFINE_OPMVV_VIE(vsext_vf2, 0b00111)
+
+# undef DEFINE_OPIVI
+# undef DEFINE_OPIVV
+# undef DEFINE_OPIVX
+# undef DEFINE_OPFVV
+# undef DEFINE_OPFWV
+# undef DEFINE_OPFVF
+# undef DEFINE_OPFWF
+# undef DEFINE_OPFVV_FMA
+# undef DEFINE_OPFVF_FMA
+# undef DEFINE_OPMVV_VIE
+
+void AssemblerRISCVV::vsetvli(Register rd, Register rs1, VSew vsew, Vlmul vlmul,
+ TailAgnosticType tail, MaskAgnosticType mask) {
+ int32_t zimm = GenZimm(vsew, vlmul, tail, mask);
+ Instr instr = OP_V | ((rd.code() & 0x1F) << kRvvRdShift) | (0x7 << 12) |
+ ((rs1.code() & 0x1F) << kRvvRs1Shift) |
+ (((uint32_t)zimm << kRvvZimmShift) & kRvvZimmMask) | 0x0 << 31;
+ emit(instr);
+}
+
+void AssemblerRISCVV::vsetivli(Register rd, uint8_t uimm, VSew vsew,
+ Vlmul vlmul, TailAgnosticType tail,
+ MaskAgnosticType mask) {
+ MOZ_ASSERT(is_uint5(uimm));
+ int32_t zimm = GenZimm(vsew, vlmul, tail, mask) & 0x3FF;
+ Instr instr = OP_V | ((rd.code() & 0x1F) << kRvvRdShift) | (0x7 << 12) |
+ ((uimm & 0x1F) << kRvvUimmShift) |
+ (((uint32_t)zimm << kRvvZimmShift) & kRvvZimmMask) | 0x3 << 30;
+ emit(instr);
+}
+
+void AssemblerRISCVV::vsetvl(Register rd, Register rs1, Register rs2) {
+ Instr instr = OP_V | ((rd.code() & 0x1F) << kRvvRdShift) | (0x7 << 12) |
+ ((rs1.code() & 0x1F) << kRvvRs1Shift) |
+ ((rs2.code() & 0x1F) << kRvvRs2Shift) | 0x40 << 25;
+ emit(instr);
+}
+
+uint8_t vsew_switch(VSew vsew) {
+ uint8_t width;
+ switch (vsew) {
+ case E8:
+ width = 0b000;
+ break;
+ case E16:
+ width = 0b101;
+ break;
+ case E32:
+ width = 0b110;
+ break;
+ default:
+ width = 0b111;
+ break;
+ }
+ return width;
+}
+
+// OPIVV OPFVV OPMVV
+void AssemblerRISCVV::GenInstrV(uint8_t funct6, OpcodeRISCVV opcode,
+ VRegister vd, VRegister vs1, VRegister vs2,
+ MaskType mask) {
+ MOZ_ASSERT(opcode == OP_MVV || opcode == OP_FVV || opcode == OP_IVV);
+ Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
+ ((vd.code() & 0x1F) << kRvvVdShift) |
+ ((vs1.code() & 0x1F) << kRvvVs1Shift) |
+ ((vs2.code() & 0x1F) << kRvvVs2Shift);
+ emit(instr);
+}
+
+void AssemblerRISCVV::GenInstrV(uint8_t funct6, OpcodeRISCVV opcode,
+ VRegister vd, int8_t vs1, VRegister vs2,
+ MaskType mask) {
+ MOZ_ASSERT(opcode == OP_MVV || opcode == OP_FVV || opcode == OP_IVV);
+ Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
+ ((vd.code() & 0x1F) << kRvvVdShift) |
+ ((vs1 & 0x1F) << kRvvVs1Shift) |
+ ((vs2.code() & 0x1F) << kRvvVs2Shift);
+ emit(instr);
+}
+// OPMVV OPFVV
+void AssemblerRISCVV::GenInstrV(uint8_t funct6, OpcodeRISCVV opcode,
+ Register rd, VRegister vs1, VRegister vs2,
+ MaskType mask) {
+ MOZ_ASSERT(opcode == OP_MVV || opcode == OP_FVV);
+ Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
+ ((rd.code() & 0x1F) << kRvvVdShift) |
+ ((vs1.code() & 0x1F) << kRvvVs1Shift) |
+ ((vs2.code() & 0x1F) << kRvvVs2Shift);
+ emit(instr);
+}
+
+// OPFVV
+void AssemblerRISCVV::GenInstrV(uint8_t funct6, OpcodeRISCVV opcode,
+ FPURegister fd, VRegister vs1, VRegister vs2,
+ MaskType mask) {
+ MOZ_ASSERT(opcode == OP_FVV);
+ Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
+ ((fd.code() & 0x1F) << kRvvVdShift) |
+ ((vs1.code() & 0x1F) << kRvvVs1Shift) |
+ ((vs2.code() & 0x1F) << kRvvVs2Shift);
+ emit(instr);
+}
+
+// OPIVX OPMVX
+void AssemblerRISCVV::GenInstrV(uint8_t funct6, OpcodeRISCVV opcode,
+ VRegister vd, Register rs1, VRegister vs2,
+ MaskType mask) {
+ MOZ_ASSERT(opcode == OP_IVX || opcode == OP_MVX);
+ Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
+ ((vd.code() & 0x1F) << kRvvVdShift) |
+ ((rs1.code() & 0x1F) << kRvvRs1Shift) |
+ ((vs2.code() & 0x1F) << kRvvVs2Shift);
+ emit(instr);
+}
+
+// OPFVF
+void AssemblerRISCVV::GenInstrV(uint8_t funct6, OpcodeRISCVV opcode,
+ VRegister vd, FPURegister fs1, VRegister vs2,
+ MaskType mask) {
+ MOZ_ASSERT(opcode == OP_FVF);
+ Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
+ ((vd.code() & 0x1F) << kRvvVdShift) |
+ ((fs1.code() & 0x1F) << kRvvRs1Shift) |
+ ((vs2.code() & 0x1F) << kRvvVs2Shift);
+ emit(instr);
+}
+
+// OPMVX
+void AssemblerRISCVV::GenInstrV(uint8_t funct6, Register rd, Register rs1,
+ VRegister vs2, MaskType mask) {
+ Instr instr = (funct6 << kRvvFunct6Shift) | OP_MVX | (mask << kRvvVmShift) |
+ ((rd.code() & 0x1F) << kRvvVdShift) |
+ ((rs1.code() & 0x1F) << kRvvRs1Shift) |
+ ((vs2.code() & 0x1F) << kRvvVs2Shift);
+ emit(instr);
+}
+// OPIVI
+void AssemblerRISCVV::GenInstrV(uint8_t funct6, VRegister vd, int8_t imm5,
+ VRegister vs2, MaskType mask) {
+ MOZ_ASSERT(is_uint5(imm5) || is_int5(imm5));
+ Instr instr = (funct6 << kRvvFunct6Shift) | OP_IVI | (mask << kRvvVmShift) |
+ ((vd.code() & 0x1F) << kRvvVdShift) |
+ (((uint32_t)imm5 << kRvvImm5Shift) & kRvvImm5Mask) |
+ ((vs2.code() & 0x1F) << kRvvVs2Shift);
+ emit(instr);
+}
+
+// VL VS
+void AssemblerRISCVV::GenInstrV(BaseOpcode opcode, uint8_t width, VRegister vd,
+ Register rs1, uint8_t umop, MaskType mask,
+ uint8_t IsMop, bool IsMew, uint8_t Nf) {
+ MOZ_ASSERT(opcode == LOAD_FP || opcode == STORE_FP);
+ Instr instr = opcode | ((vd.code() << kRvvVdShift) & kRvvVdMask) |
+ ((width << kRvvWidthShift) & kRvvWidthMask) |
+ ((rs1.code() << kRvvRs1Shift) & kRvvRs1Mask) |
+ ((umop << kRvvRs2Shift) & kRvvRs2Mask) |
+ ((mask << kRvvVmShift) & kRvvVmMask) |
+ ((IsMop << kRvvMopShift) & kRvvMopMask) |
+ ((IsMew << kRvvMewShift) & kRvvMewMask) |
+ ((Nf << kRvvNfShift) & kRvvNfMask);
+ emit(instr);
+}
+void AssemblerRISCVV::GenInstrV(BaseOpcode opcode, uint8_t width, VRegister vd,
+ Register rs1, Register rs2, MaskType mask,
+ uint8_t IsMop, bool IsMew, uint8_t Nf) {
+ MOZ_ASSERT(opcode == LOAD_FP || opcode == STORE_FP);
+ Instr instr = opcode | ((vd.code() << kRvvVdShift) & kRvvVdMask) |
+ ((width << kRvvWidthShift) & kRvvWidthMask) |
+ ((rs1.code() << kRvvRs1Shift) & kRvvRs1Mask) |
+ ((rs2.code() << kRvvRs2Shift) & kRvvRs2Mask) |
+ ((mask << kRvvVmShift) & kRvvVmMask) |
+ ((IsMop << kRvvMopShift) & kRvvMopMask) |
+ ((IsMew << kRvvMewShift) & kRvvMewMask) |
+ ((Nf << kRvvNfShift) & kRvvNfMask);
+ emit(instr);
+}
+// VL VS AMO
+void AssemblerRISCVV::GenInstrV(BaseOpcode opcode, uint8_t width, VRegister vd,
+ Register rs1, VRegister vs2, MaskType mask,
+ uint8_t IsMop, bool IsMew, uint8_t Nf) {
+ MOZ_ASSERT(opcode == LOAD_FP || opcode == STORE_FP || opcode == AMO);
+ Instr instr = opcode | ((vd.code() << kRvvVdShift) & kRvvVdMask) |
+ ((width << kRvvWidthShift) & kRvvWidthMask) |
+ ((rs1.code() << kRvvRs1Shift) & kRvvRs1Mask) |
+ ((vs2.code() << kRvvRs2Shift) & kRvvRs2Mask) |
+ ((mask << kRvvVmShift) & kRvvVmMask) |
+ ((IsMop << kRvvMopShift) & kRvvMopMask) |
+ ((IsMew << kRvvMewShift) & kRvvMewMask) |
+ ((Nf << kRvvNfShift) & kRvvNfMask);
+ emit(instr);
+}
+// vmv_xs vcpop_m vfirst_m
+void AssemblerRISCVV::GenInstrV(uint8_t funct6, OpcodeRISCVV opcode,
+ Register rd, uint8_t vs1, VRegister vs2,
+ MaskType mask) {
+ MOZ_ASSERT(opcode == OP_MVV);
+ Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
+ ((rd.code() & 0x1F) << kRvvVdShift) |
+ ((vs1 & 0x1F) << kRvvVs1Shift) |
+ ((vs2.code() & 0x1F) << kRvvVs2Shift);
+ emit(instr);
+}
+
+void AssemblerRISCVV::vl(VRegister vd, Register rs1, uint8_t lumop, VSew vsew,
+ MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b000);
+}
+void AssemblerRISCVV::vls(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b000);
+}
+void AssemblerRISCVV::vlx(VRegister vd, Register rs1, VRegister vs2, VSew vsew,
+ MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, vs2, mask, 0b11, 0, 0);
+}
+
+void AssemblerRISCVV::vs(VRegister vd, Register rs1, uint8_t sumop, VSew vsew,
+ MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b000);
+}
+void AssemblerRISCVV::vss(VRegister vs3, Register rs1, Register rs2, VSew vsew,
+ MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vs3, rs1, rs2, mask, 0b10, 0, 0b000);
+}
+
+void AssemblerRISCVV::vsx(VRegister vd, Register rs1, VRegister vs2, VSew vsew,
+ MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, vs2, mask, 0b11, 0, 0b000);
+}
+void AssemblerRISCVV::vsu(VRegister vd, Register rs1, VRegister vs2, VSew vsew,
+ MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, vs2, mask, 0b01, 0, 0b000);
+}
+
+void AssemblerRISCVV::vlseg2(VRegister vd, Register rs1, uint8_t lumop,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b001);
+}
+
+void AssemblerRISCVV::vlseg3(VRegister vd, Register rs1, uint8_t lumop,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b010);
+}
+
+void AssemblerRISCVV::vlseg4(VRegister vd, Register rs1, uint8_t lumop,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b011);
+}
+
+void AssemblerRISCVV::vlseg5(VRegister vd, Register rs1, uint8_t lumop,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b100);
+}
+
+void AssemblerRISCVV::vlseg6(VRegister vd, Register rs1, uint8_t lumop,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b101);
+}
+
+void AssemblerRISCVV::vlseg7(VRegister vd, Register rs1, uint8_t lumop,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b110);
+}
+
+void AssemblerRISCVV::vlseg8(VRegister vd, Register rs1, uint8_t lumop,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b111);
+}
+void AssemblerRISCVV::vsseg2(VRegister vd, Register rs1, uint8_t sumop,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b001);
+}
+void AssemblerRISCVV::vsseg3(VRegister vd, Register rs1, uint8_t sumop,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b010);
+}
+void AssemblerRISCVV::vsseg4(VRegister vd, Register rs1, uint8_t sumop,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b011);
+}
+void AssemblerRISCVV::vsseg5(VRegister vd, Register rs1, uint8_t sumop,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b100);
+}
+void AssemblerRISCVV::vsseg6(VRegister vd, Register rs1, uint8_t sumop,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b101);
+}
+void AssemblerRISCVV::vsseg7(VRegister vd, Register rs1, uint8_t sumop,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b110);
+}
+void AssemblerRISCVV::vsseg8(VRegister vd, Register rs1, uint8_t sumop,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b111);
+}
+
+void AssemblerRISCVV::vlsseg2(VRegister vd, Register rs1, Register rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b001);
+}
+void AssemblerRISCVV::vlsseg3(VRegister vd, Register rs1, Register rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b010);
+}
+void AssemblerRISCVV::vlsseg4(VRegister vd, Register rs1, Register rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b011);
+}
+void AssemblerRISCVV::vlsseg5(VRegister vd, Register rs1, Register rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b100);
+}
+void AssemblerRISCVV::vlsseg6(VRegister vd, Register rs1, Register rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b101);
+}
+void AssemblerRISCVV::vlsseg7(VRegister vd, Register rs1, Register rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b110);
+}
+void AssemblerRISCVV::vlsseg8(VRegister vd, Register rs1, Register rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b111);
+}
+void AssemblerRISCVV::vssseg2(VRegister vd, Register rs1, Register rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b001);
+}
+void AssemblerRISCVV::vssseg3(VRegister vd, Register rs1, Register rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b010);
+}
+void AssemblerRISCVV::vssseg4(VRegister vd, Register rs1, Register rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b011);
+}
+void AssemblerRISCVV::vssseg5(VRegister vd, Register rs1, Register rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b100);
+}
+void AssemblerRISCVV::vssseg6(VRegister vd, Register rs1, Register rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b101);
+}
+void AssemblerRISCVV::vssseg7(VRegister vd, Register rs1, Register rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b110);
+}
+void AssemblerRISCVV::vssseg8(VRegister vd, Register rs1, Register rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b111);
+}
+
+void AssemblerRISCVV::vlxseg2(VRegister vd, Register rs1, VRegister rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b001);
+}
+void AssemblerRISCVV::vlxseg3(VRegister vd, Register rs1, VRegister rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b010);
+}
+void AssemblerRISCVV::vlxseg4(VRegister vd, Register rs1, VRegister rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b011);
+}
+void AssemblerRISCVV::vlxseg5(VRegister vd, Register rs1, VRegister rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b100);
+}
+void AssemblerRISCVV::vlxseg6(VRegister vd, Register rs1, VRegister rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b101);
+}
+void AssemblerRISCVV::vlxseg7(VRegister vd, Register rs1, VRegister rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b110);
+}
+void AssemblerRISCVV::vlxseg8(VRegister vd, Register rs1, VRegister rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b111);
+}
+void AssemblerRISCVV::vsxseg2(VRegister vd, Register rs1, VRegister rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b001);
+}
+void AssemblerRISCVV::vsxseg3(VRegister vd, Register rs1, VRegister rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b010);
+}
+void AssemblerRISCVV::vsxseg4(VRegister vd, Register rs1, VRegister rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b011);
+}
+void AssemblerRISCVV::vsxseg5(VRegister vd, Register rs1, VRegister rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b100);
+}
+void AssemblerRISCVV::vsxseg6(VRegister vd, Register rs1, VRegister rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b101);
+}
+void AssemblerRISCVV::vsxseg7(VRegister vd, Register rs1, VRegister rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b110);
+}
+void AssemblerRISCVV::vsxseg8(VRegister vd, Register rs1, VRegister rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b111);
+}
+
+void AssemblerRISCVV::vfirst_m(Register rd, VRegister vs2, MaskType mask) {
+ GenInstrV(VWXUNARY0_FUNCT6, OP_MVV, rd, 0b10001, vs2, mask);
+}
+
+void AssemblerRISCVV::vcpop_m(Register rd, VRegister vs2, MaskType mask) {
+ GenInstrV(VWXUNARY0_FUNCT6, OP_MVV, rd, 0b10000, vs2, mask);
+}
+
+LoadStoreLaneParams::LoadStoreLaneParams(MachineRepresentation rep,
+ uint8_t laneidx) {
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ *this = LoadStoreLaneParams(laneidx, 8, kRvvVLEN / 16);
+ break;
+ case MachineRepresentation::kWord16:
+ *this = LoadStoreLaneParams(laneidx, 16, kRvvVLEN / 8);
+ break;
+ case MachineRepresentation::kWord32:
+ *this = LoadStoreLaneParams(laneidx, 32, kRvvVLEN / 4);
+ break;
+ case MachineRepresentation::kWord64:
+ *this = LoadStoreLaneParams(laneidx, 64, kRvvVLEN / 2);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+} // namespace jit
+} // namespace js
+#endif
diff --git a/js/src/jit/riscv64/extension/extension-riscv-v.h b/js/src/jit/riscv64/extension/extension-riscv-v.h
new file mode 100644
index 0000000000..8f04f24c56
--- /dev/null
+++ b/js/src/jit/riscv64/extension/extension-riscv-v.h
@@ -0,0 +1,484 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef jit_riscv64_extension_Extension_riscv_v_h_
+#define jit_riscv64_extension_Extension_riscv_v_h_
+#ifdef CAN_USE_RVV
+# include "jit/riscv64/Architecture-riscv64.h"
+# include "jit/riscv64/constant/Constant-riscv64.h"
+# include "jit/riscv64/extension/base-assembler-riscv.h"
+
+namespace js {
+namespace jit {
+
+class AssemblerRISCVV : public AssemblerRiscvBase {
+ public:
+ // RVV
+ static int32_t GenZimm(VSew vsew, Vlmul vlmul, TailAgnosticType tail = tu,
+ MaskAgnosticType mask = mu) {
+ return (mask << 7) | (tail << 6) | ((vsew & 0x7) << 3) | (vlmul & 0x7);
+ }
+
+ void vl(VRegister vd, Register rs1, uint8_t lumop, VSew vsew,
+ MaskType mask = NoMask);
+ void vls(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask = NoMask);
+ void vlx(VRegister vd, Register rs1, VRegister vs3, VSew vsew,
+ MaskType mask = NoMask);
+
+ void vs(VRegister vd, Register rs1, uint8_t sumop, VSew vsew,
+ MaskType mask = NoMask);
+ void vss(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask = NoMask);
+ void vsx(VRegister vd, Register rs1, VRegister vs3, VSew vsew,
+ MaskType mask = NoMask);
+
+ void vsu(VRegister vd, Register rs1, VRegister vs3, VSew vsew,
+ MaskType mask = NoMask);
+
+# define SegInstr(OP) \
+ void OP##seg2(ARG); \
+ void OP##seg3(ARG); \
+ void OP##seg4(ARG); \
+ void OP##seg5(ARG); \
+ void OP##seg6(ARG); \
+ void OP##seg7(ARG); \
+ void OP##seg8(ARG);
+
+# define ARG \
+ VRegister vd, Register rs1, uint8_t lumop, VSew vsew, MaskType mask = NoMask
+
+ SegInstr(vl) SegInstr(vs)
+# undef ARG
+
+# define ARG \
+ VRegister vd, Register rs1, Register rs2, VSew vsew, MaskType mask = NoMask
+
+ SegInstr(vls) SegInstr(vss)
+# undef ARG
+
+# define ARG \
+ VRegister vd, Register rs1, VRegister rs2, VSew vsew, MaskType mask = NoMask
+
+ SegInstr(vsx) SegInstr(vlx)
+# undef ARG
+# undef SegInstr
+
+ // RVV Vector Arithmetic Instruction
+
+ void vmv_vv(VRegister vd, VRegister vs1);
+ void vmv_vx(VRegister vd, Register rs1);
+ void vmv_vi(VRegister vd, uint8_t simm5);
+ void vmv_xs(Register rd, VRegister vs2);
+ void vmv_sx(VRegister vd, Register rs1);
+ void vmerge_vv(VRegister vd, VRegister vs1, VRegister vs2);
+ void vmerge_vx(VRegister vd, Register rs1, VRegister vs2);
+ void vmerge_vi(VRegister vd, uint8_t imm5, VRegister vs2);
+
+ void vredmaxu_vs(VRegister vd, VRegister vs2, VRegister vs1,
+ MaskType mask = NoMask);
+ void vredmax_vs(VRegister vd, VRegister vs2, VRegister vs1,
+ MaskType mask = NoMask);
+ void vredmin_vs(VRegister vd, VRegister vs2, VRegister vs1,
+ MaskType mask = NoMask);
+ void vredminu_vs(VRegister vd, VRegister vs2, VRegister vs1,
+ MaskType mask = NoMask);
+
+ void vadc_vv(VRegister vd, VRegister vs1, VRegister vs2);
+ void vadc_vx(VRegister vd, Register rs1, VRegister vs2);
+ void vadc_vi(VRegister vd, uint8_t imm5, VRegister vs2);
+
+ void vmadc_vv(VRegister vd, VRegister vs1, VRegister vs2);
+ void vmadc_vx(VRegister vd, Register rs1, VRegister vs2);
+ void vmadc_vi(VRegister vd, uint8_t imm5, VRegister vs2);
+
+ void vfmv_vf(VRegister vd, FPURegister fs1, MaskType mask = NoMask);
+ void vfmv_fs(FPURegister fd, VRegister vs2);
+ void vfmv_sf(VRegister vd, FPURegister fs);
+
+ void vwaddu_wx(VRegister vd, VRegister vs2, Register rs1,
+ MaskType mask = NoMask);
+ void vid_v(VRegister vd, MaskType mask = Mask);
+
+# define DEFINE_OPIVV(name, funct6) \
+ void name##_vv(VRegister vd, VRegister vs2, VRegister vs1, \
+ MaskType mask = NoMask);
+
+# define DEFINE_OPIVX(name, funct6) \
+ void name##_vx(VRegister vd, VRegister vs2, Register rs1, \
+ MaskType mask = NoMask);
+
+# define DEFINE_OPIVI(name, funct6) \
+ void name##_vi(VRegister vd, VRegister vs2, int8_t imm5, \
+ MaskType mask = NoMask);
+
+# define DEFINE_OPMVV(name, funct6) \
+ void name##_vv(VRegister vd, VRegister vs2, VRegister vs1, \
+ MaskType mask = NoMask);
+
+# define DEFINE_OPMVX(name, funct6) \
+ void name##_vx(VRegister vd, VRegister vs2, Register rs1, \
+ MaskType mask = NoMask);
+
+# define DEFINE_OPFVV(name, funct6) \
+ void name##_vv(VRegister vd, VRegister vs2, VRegister vs1, \
+ MaskType mask = NoMask);
+
+# define DEFINE_OPFWV(name, funct6) \
+ void name##_wv(VRegister vd, VRegister vs2, VRegister vs1, \
+ MaskType mask = NoMask);
+
+# define DEFINE_OPFRED(name, funct6) \
+ void name##_vs(VRegister vd, VRegister vs2, VRegister vs1, \
+ MaskType mask = NoMask);
+
+# define DEFINE_OPFVF(name, funct6) \
+ void name##_vf(VRegister vd, VRegister vs2, FPURegister fs1, \
+ MaskType mask = NoMask);
+
+# define DEFINE_OPFWF(name, funct6) \
+ void name##_wf(VRegister vd, VRegister vs2, FPURegister fs1, \
+ MaskType mask = NoMask);
+
+# define DEFINE_OPFVV_FMA(name, funct6) \
+ void name##_vv(VRegister vd, VRegister vs1, VRegister vs2, \
+ MaskType mask = NoMask);
+
+# define DEFINE_OPFVF_FMA(name, funct6) \
+ void name##_vf(VRegister vd, FPURegister fs1, VRegister vs2, \
+ MaskType mask = NoMask);
+
+# define DEFINE_OPMVV_VIE(name) \
+ void name(VRegister vd, VRegister vs2, MaskType mask = NoMask);
+
+ DEFINE_OPIVV(vadd, VADD_FUNCT6)
+ DEFINE_OPIVX(vadd, VADD_FUNCT6)
+ DEFINE_OPIVI(vadd, VADD_FUNCT6)
+ DEFINE_OPIVV(vsub, VSUB_FUNCT6)
+ DEFINE_OPIVX(vsub, VSUB_FUNCT6)
+ DEFINE_OPMVX(vdiv, VDIV_FUNCT6)
+ DEFINE_OPMVX(vdivu, VDIVU_FUNCT6)
+ DEFINE_OPMVX(vmul, VMUL_FUNCT6)
+ DEFINE_OPMVX(vmulhu, VMULHU_FUNCT6)
+ DEFINE_OPMVX(vmulhsu, VMULHSU_FUNCT6)
+ DEFINE_OPMVX(vmulh, VMULH_FUNCT6)
+ DEFINE_OPMVV(vdiv, VDIV_FUNCT6)
+ DEFINE_OPMVV(vdivu, VDIVU_FUNCT6)
+ DEFINE_OPMVV(vmul, VMUL_FUNCT6)
+ DEFINE_OPMVV(vmulhu, VMULHU_FUNCT6)
+ DEFINE_OPMVV(vmulhsu, VMULHSU_FUNCT6)
+ DEFINE_OPMVV(vmulh, VMULH_FUNCT6)
+ DEFINE_OPMVV(vwmul, VWMUL_FUNCT6)
+ DEFINE_OPMVV(vwmulu, VWMULU_FUNCT6)
+ DEFINE_OPMVV(vwaddu, VWADDU_FUNCT6)
+ DEFINE_OPMVV(vwadd, VWADD_FUNCT6)
+ DEFINE_OPMVV(vcompress, VCOMPRESS_FUNCT6)
+ DEFINE_OPIVX(vsadd, VSADD_FUNCT6)
+ DEFINE_OPIVV(vsadd, VSADD_FUNCT6)
+ DEFINE_OPIVI(vsadd, VSADD_FUNCT6)
+ DEFINE_OPIVX(vsaddu, VSADD_FUNCT6)
+ DEFINE_OPIVV(vsaddu, VSADDU_FUNCT6)
+ DEFINE_OPIVI(vsaddu, VSADDU_FUNCT6)
+ DEFINE_OPIVX(vssub, VSSUB_FUNCT6)
+ DEFINE_OPIVV(vssub, VSSUB_FUNCT6)
+ DEFINE_OPIVX(vssubu, VSSUBU_FUNCT6)
+ DEFINE_OPIVV(vssubu, VSSUBU_FUNCT6)
+ DEFINE_OPIVX(vrsub, VRSUB_FUNCT6)
+ DEFINE_OPIVI(vrsub, VRSUB_FUNCT6)
+ DEFINE_OPIVV(vminu, VMINU_FUNCT6)
+ DEFINE_OPIVX(vminu, VMINU_FUNCT6)
+ DEFINE_OPIVV(vmin, VMIN_FUNCT6)
+ DEFINE_OPIVX(vmin, VMIN_FUNCT6)
+ DEFINE_OPIVV(vmaxu, VMAXU_FUNCT6)
+ DEFINE_OPIVX(vmaxu, VMAXU_FUNCT6)
+ DEFINE_OPIVV(vmax, VMAX_FUNCT6)
+ DEFINE_OPIVX(vmax, VMAX_FUNCT6)
+ DEFINE_OPIVV(vand, VAND_FUNCT6)
+ DEFINE_OPIVX(vand, VAND_FUNCT6)
+ DEFINE_OPIVI(vand, VAND_FUNCT6)
+ DEFINE_OPIVV(vor, VOR_FUNCT6)
+ DEFINE_OPIVX(vor, VOR_FUNCT6)
+ DEFINE_OPIVI(vor, VOR_FUNCT6)
+ DEFINE_OPIVV(vxor, VXOR_FUNCT6)
+ DEFINE_OPIVX(vxor, VXOR_FUNCT6)
+ DEFINE_OPIVI(vxor, VXOR_FUNCT6)
+ DEFINE_OPIVV(vrgather, VRGATHER_FUNCT6)
+ DEFINE_OPIVX(vrgather, VRGATHER_FUNCT6)
+ DEFINE_OPIVI(vrgather, VRGATHER_FUNCT6)
+
+ DEFINE_OPIVX(vslidedown, VSLIDEDOWN_FUNCT6)
+ DEFINE_OPIVI(vslidedown, VSLIDEDOWN_FUNCT6)
+ DEFINE_OPIVX(vslideup, VSLIDEUP_FUNCT6)
+ DEFINE_OPIVI(vslideup, VSLIDEUP_FUNCT6)
+
+ DEFINE_OPIVV(vmseq, VMSEQ_FUNCT6)
+ DEFINE_OPIVX(vmseq, VMSEQ_FUNCT6)
+ DEFINE_OPIVI(vmseq, VMSEQ_FUNCT6)
+
+ DEFINE_OPIVV(vmsne, VMSNE_FUNCT6)
+ DEFINE_OPIVX(vmsne, VMSNE_FUNCT6)
+ DEFINE_OPIVI(vmsne, VMSNE_FUNCT6)
+
+ DEFINE_OPIVV(vmsltu, VMSLTU_FUNCT6)
+ DEFINE_OPIVX(vmsltu, VMSLTU_FUNCT6)
+
+ DEFINE_OPIVV(vmslt, VMSLT_FUNCT6)
+ DEFINE_OPIVX(vmslt, VMSLT_FUNCT6)
+
+ DEFINE_OPIVV(vmsle, VMSLE_FUNCT6)
+ DEFINE_OPIVX(vmsle, VMSLE_FUNCT6)
+ DEFINE_OPIVI(vmsle, VMSLE_FUNCT6)
+
+ DEFINE_OPIVV(vmsleu, VMSLEU_FUNCT6)
+ DEFINE_OPIVX(vmsleu, VMSLEU_FUNCT6)
+ DEFINE_OPIVI(vmsleu, VMSLEU_FUNCT6)
+
+ DEFINE_OPIVI(vmsgt, VMSGT_FUNCT6)
+ DEFINE_OPIVX(vmsgt, VMSGT_FUNCT6)
+
+ DEFINE_OPIVI(vmsgtu, VMSGTU_FUNCT6)
+ DEFINE_OPIVX(vmsgtu, VMSGTU_FUNCT6)
+
+ DEFINE_OPIVV(vsrl, VSRL_FUNCT6)
+ DEFINE_OPIVX(vsrl, VSRL_FUNCT6)
+ DEFINE_OPIVI(vsrl, VSRL_FUNCT6)
+
+ DEFINE_OPIVV(vsra, VSRA_FUNCT6)
+ DEFINE_OPIVX(vsra, VSRA_FUNCT6)
+ DEFINE_OPIVI(vsra, VSRA_FUNCT6)
+
+ DEFINE_OPIVV(vsll, VSLL_FUNCT6)
+ DEFINE_OPIVX(vsll, VSLL_FUNCT6)
+ DEFINE_OPIVI(vsll, VSLL_FUNCT6)
+
+ DEFINE_OPIVV(vsmul, VSMUL_FUNCT6)
+ DEFINE_OPIVX(vsmul, VSMUL_FUNCT6)
+
+ DEFINE_OPFVV(vfadd, VFADD_FUNCT6)
+ DEFINE_OPFVF(vfadd, VFADD_FUNCT6)
+ DEFINE_OPFVV(vfsub, VFSUB_FUNCT6)
+ DEFINE_OPFVF(vfsub, VFSUB_FUNCT6)
+ DEFINE_OPFVV(vfdiv, VFDIV_FUNCT6)
+ DEFINE_OPFVF(vfdiv, VFDIV_FUNCT6)
+ DEFINE_OPFVV(vfmul, VFMUL_FUNCT6)
+ DEFINE_OPFVF(vfmul, VFMUL_FUNCT6)
+
+ // Vector Widening Floating-Point Add/Subtract Instructions
+ DEFINE_OPFVV(vfwadd, VFWADD_FUNCT6)
+ DEFINE_OPFVF(vfwadd, VFWADD_FUNCT6)
+ DEFINE_OPFVV(vfwsub, VFWSUB_FUNCT6)
+ DEFINE_OPFVF(vfwsub, VFWSUB_FUNCT6)
+ DEFINE_OPFWV(vfwadd, VFWADD_W_FUNCT6)
+ DEFINE_OPFWF(vfwadd, VFWADD_W_FUNCT6)
+ DEFINE_OPFWV(vfwsub, VFWSUB_W_FUNCT6)
+ DEFINE_OPFWF(vfwsub, VFWSUB_W_FUNCT6)
+
+ // Vector Widening Floating-Point Reduction Instructions
+ DEFINE_OPFVV(vfwredusum, VFWREDUSUM_FUNCT6)
+ DEFINE_OPFVV(vfwredosum, VFWREDOSUM_FUNCT6)
+
+ // Vector Widening Floating-Point Multiply
+ DEFINE_OPFVV(vfwmul, VFWMUL_FUNCT6)
+ DEFINE_OPFVF(vfwmul, VFWMUL_FUNCT6)
+
+ DEFINE_OPFVV(vmfeq, VMFEQ_FUNCT6)
+ DEFINE_OPFVV(vmfne, VMFNE_FUNCT6)
+ DEFINE_OPFVV(vmflt, VMFLT_FUNCT6)
+ DEFINE_OPFVV(vmfle, VMFLE_FUNCT6)
+ DEFINE_OPFVV(vfmax, VMFMAX_FUNCT6)
+ DEFINE_OPFVV(vfmin, VMFMIN_FUNCT6)
+ DEFINE_OPFRED(vfredmax, VFREDMAX_FUNCT6)
+
+ DEFINE_OPFVV(vfsngj, VFSGNJ_FUNCT6)
+ DEFINE_OPFVF(vfsngj, VFSGNJ_FUNCT6)
+ DEFINE_OPFVV(vfsngjn, VFSGNJN_FUNCT6)
+ DEFINE_OPFVF(vfsngjn, VFSGNJN_FUNCT6)
+ DEFINE_OPFVV(vfsngjx, VFSGNJX_FUNCT6)
+ DEFINE_OPFVF(vfsngjx, VFSGNJX_FUNCT6)
+
+ // Vector Single-Width Floating-Point Fused Multiply-Add Instructions
+ DEFINE_OPFVV_FMA(vfmadd, VFMADD_FUNCT6)
+ DEFINE_OPFVF_FMA(vfmadd, VFMADD_FUNCT6)
+ DEFINE_OPFVV_FMA(vfmsub, VFMSUB_FUNCT6)
+ DEFINE_OPFVF_FMA(vfmsub, VFMSUB_FUNCT6)
+ DEFINE_OPFVV_FMA(vfmacc, VFMACC_FUNCT6)
+ DEFINE_OPFVF_FMA(vfmacc, VFMACC_FUNCT6)
+ DEFINE_OPFVV_FMA(vfmsac, VFMSAC_FUNCT6)
+ DEFINE_OPFVF_FMA(vfmsac, VFMSAC_FUNCT6)
+ DEFINE_OPFVV_FMA(vfnmadd, VFNMADD_FUNCT6)
+ DEFINE_OPFVF_FMA(vfnmadd, VFNMADD_FUNCT6)
+ DEFINE_OPFVV_FMA(vfnmsub, VFNMSUB_FUNCT6)
+ DEFINE_OPFVF_FMA(vfnmsub, VFNMSUB_FUNCT6)
+ DEFINE_OPFVV_FMA(vfnmacc, VFNMACC_FUNCT6)
+ DEFINE_OPFVF_FMA(vfnmacc, VFNMACC_FUNCT6)
+ DEFINE_OPFVV_FMA(vfnmsac, VFNMSAC_FUNCT6)
+ DEFINE_OPFVF_FMA(vfnmsac, VFNMSAC_FUNCT6)
+
+ // Vector Widening Floating-Point Fused Multiply-Add Instructions
+ DEFINE_OPFVV_FMA(vfwmacc, VFWMACC_FUNCT6)
+ DEFINE_OPFVF_FMA(vfwmacc, VFWMACC_FUNCT6)
+ DEFINE_OPFVV_FMA(vfwnmacc, VFWNMACC_FUNCT6)
+ DEFINE_OPFVF_FMA(vfwnmacc, VFWNMACC_FUNCT6)
+ DEFINE_OPFVV_FMA(vfwmsac, VFWMSAC_FUNCT6)
+ DEFINE_OPFVF_FMA(vfwmsac, VFWMSAC_FUNCT6)
+ DEFINE_OPFVV_FMA(vfwnmsac, VFWNMSAC_FUNCT6)
+ DEFINE_OPFVF_FMA(vfwnmsac, VFWNMSAC_FUNCT6)
+
+ // Vector Narrowing Fixed-Point Clip Instructions
+ DEFINE_OPIVV(vnclip, VNCLIP_FUNCT6)
+ DEFINE_OPIVX(vnclip, VNCLIP_FUNCT6)
+ DEFINE_OPIVI(vnclip, VNCLIP_FUNCT6)
+ DEFINE_OPIVV(vnclipu, VNCLIPU_FUNCT6)
+ DEFINE_OPIVX(vnclipu, VNCLIPU_FUNCT6)
+ DEFINE_OPIVI(vnclipu, VNCLIPU_FUNCT6)
+
+ // Vector Integer Extension
+ DEFINE_OPMVV_VIE(vzext_vf8)
+ DEFINE_OPMVV_VIE(vsext_vf8)
+ DEFINE_OPMVV_VIE(vzext_vf4)
+ DEFINE_OPMVV_VIE(vsext_vf4)
+ DEFINE_OPMVV_VIE(vzext_vf2)
+ DEFINE_OPMVV_VIE(vsext_vf2)
+
+# undef DEFINE_OPIVI
+# undef DEFINE_OPIVV
+# undef DEFINE_OPIVX
+# undef DEFINE_OPMVV
+# undef DEFINE_OPMVX
+# undef DEFINE_OPFVV
+# undef DEFINE_OPFWV
+# undef DEFINE_OPFVF
+# undef DEFINE_OPFWF
+# undef DEFINE_OPFVV_FMA
+# undef DEFINE_OPFVF_FMA
+# undef DEFINE_OPMVV_VIE
+# undef DEFINE_OPFRED
+
+# define DEFINE_VFUNARY(name, funct6, vs1) \
+ void name(VRegister vd, VRegister vs2, MaskType mask = NoMask) { \
+ GenInstrV(funct6, OP_FVV, vd, vs1, vs2, mask); \
+ }
+
+ DEFINE_VFUNARY(vfcvt_xu_f_v, VFUNARY0_FUNCT6, VFCVT_XU_F_V)
+ DEFINE_VFUNARY(vfcvt_x_f_v, VFUNARY0_FUNCT6, VFCVT_X_F_V)
+ DEFINE_VFUNARY(vfcvt_f_x_v, VFUNARY0_FUNCT6, VFCVT_F_X_V)
+ DEFINE_VFUNARY(vfcvt_f_xu_v, VFUNARY0_FUNCT6, VFCVT_F_XU_V)
+ DEFINE_VFUNARY(vfwcvt_xu_f_v, VFUNARY0_FUNCT6, VFWCVT_XU_F_V)
+ DEFINE_VFUNARY(vfwcvt_x_f_v, VFUNARY0_FUNCT6, VFWCVT_X_F_V)
+ DEFINE_VFUNARY(vfwcvt_f_x_v, VFUNARY0_FUNCT6, VFWCVT_F_X_V)
+ DEFINE_VFUNARY(vfwcvt_f_xu_v, VFUNARY0_FUNCT6, VFWCVT_F_XU_V)
+ DEFINE_VFUNARY(vfwcvt_f_f_v, VFUNARY0_FUNCT6, VFWCVT_F_F_V)
+
+ DEFINE_VFUNARY(vfncvt_f_f_w, VFUNARY0_FUNCT6, VFNCVT_F_F_W)
+ DEFINE_VFUNARY(vfncvt_x_f_w, VFUNARY0_FUNCT6, VFNCVT_X_F_W)
+ DEFINE_VFUNARY(vfncvt_xu_f_w, VFUNARY0_FUNCT6, VFNCVT_XU_F_W)
+
+ DEFINE_VFUNARY(vfclass_v, VFUNARY1_FUNCT6, VFCLASS_V)
+ DEFINE_VFUNARY(vfsqrt_v, VFUNARY1_FUNCT6, VFSQRT_V)
+ DEFINE_VFUNARY(vfrsqrt7_v, VFUNARY1_FUNCT6, VFRSQRT7_V)
+ DEFINE_VFUNARY(vfrec7_v, VFUNARY1_FUNCT6, VFREC7_V)
+# undef DEFINE_VFUNARY
+
+ void vnot_vv(VRegister dst, VRegister src, MaskType mask = NoMask) {
+ vxor_vi(dst, src, -1, mask);
+ }
+
+ void vneg_vv(VRegister dst, VRegister src, MaskType mask = NoMask) {
+ vrsub_vx(dst, src, zero_reg, mask);
+ }
+
+ void vfneg_vv(VRegister dst, VRegister src, MaskType mask = NoMask) {
+ vfsngjn_vv(dst, src, src, mask);
+ }
+ void vfabs_vv(VRegister dst, VRegister src, MaskType mask = NoMask) {
+ vfsngjx_vv(dst, src, src, mask);
+ }
+ void vfirst_m(Register rd, VRegister vs2, MaskType mask = NoMask);
+
+ void vcpop_m(Register rd, VRegister vs2, MaskType mask = NoMask);
+
+ protected:
+ void vsetvli(Register rd, Register rs1, VSew vsew, Vlmul vlmul,
+ TailAgnosticType tail = tu, MaskAgnosticType mask = mu);
+
+ void vsetivli(Register rd, uint8_t uimm, VSew vsew, Vlmul vlmul,
+ TailAgnosticType tail = tu, MaskAgnosticType mask = mu);
+
+ inline void vsetvlmax(Register rd, VSew vsew, Vlmul vlmul,
+ TailAgnosticType tail = tu,
+ MaskAgnosticType mask = mu) {
+ vsetvli(rd, zero_reg, vsew, vlmul, tu, mu);
+ }
+
+ inline void vsetvl(VSew vsew, Vlmul vlmul, TailAgnosticType tail = tu,
+ MaskAgnosticType mask = mu) {
+ vsetvli(zero_reg, zero_reg, vsew, vlmul, tu, mu);
+ }
+
+ void vsetvl(Register rd, Register rs1, Register rs2);
+
+ // ----------------------------RVV------------------------------------------
+ // vsetvl
+ void GenInstrV(Register rd, Register rs1, Register rs2);
+ // vsetvli
+ void GenInstrV(Register rd, Register rs1, uint32_t zimm);
+ // OPIVV OPFVV OPMVV
+ void GenInstrV(uint8_t funct6, OpcodeRISCVV opcode, VRegister vd,
+ VRegister vs1, VRegister vs2, MaskType mask = NoMask);
+ void GenInstrV(uint8_t funct6, OpcodeRISCVV opcode, VRegister vd, int8_t vs1,
+ VRegister vs2, MaskType mask = NoMask);
+ void GenInstrV(uint8_t funct6, OpcodeRISCVV opcode, VRegister vd,
+ VRegister vs2, MaskType mask = NoMask);
+ // OPMVV OPFVV
+ void GenInstrV(uint8_t funct6, OpcodeRISCVV opcode, Register rd,
+ VRegister vs1, VRegister vs2, MaskType mask = NoMask);
+ // OPFVV
+ void GenInstrV(uint8_t funct6, OpcodeRISCVV opcode, FPURegister fd,
+ VRegister vs1, VRegister vs2, MaskType mask = NoMask);
+
+ // OPIVX OPMVX
+ void GenInstrV(uint8_t funct6, OpcodeRISCVV opcode, VRegister vd,
+ Register rs1, VRegister vs2, MaskType mask = NoMask);
+ // OPFVF
+ void GenInstrV(uint8_t funct6, OpcodeRISCVV opcode, VRegister vd,
+ FPURegister fs1, VRegister vs2, MaskType mask = NoMask);
+ // OPMVX
+ void GenInstrV(uint8_t funct6, Register rd, Register rs1, VRegister vs2,
+ MaskType mask = NoMask);
+ // OPIVI
+ void GenInstrV(uint8_t funct6, VRegister vd, int8_t simm5, VRegister vs2,
+ MaskType mask = NoMask);
+
+ // VL VS
+ void GenInstrV(BaseOpcode opcode, uint8_t width, VRegister vd, Register rs1,
+ uint8_t umop, MaskType mask, uint8_t IsMop, bool IsMew,
+ uint8_t Nf);
+
+ void GenInstrV(BaseOpcode opcode, uint8_t width, VRegister vd, Register rs1,
+ Register rs2, MaskType mask, uint8_t IsMop, bool IsMew,
+ uint8_t Nf);
+ // VL VS AMO
+ void GenInstrV(BaseOpcode opcode, uint8_t width, VRegister vd, Register rs1,
+ VRegister vs2, MaskType mask, uint8_t IsMop, bool IsMew,
+ uint8_t Nf);
+ // vmv_xs vcpop_m vfirst_m
+ void GenInstrV(uint8_t funct6, OpcodeRISCVV opcode, Register rd, uint8_t vs1,
+ VRegister vs2, MaskType mask);
+};
+
+class LoadStoreLaneParams {
+ public:
+ int sz;
+ uint8_t laneidx;
+
+ LoadStoreLaneParams(MachineRepresentation rep, uint8_t laneidx);
+
+ private:
+ LoadStoreLaneParams(uint8_t laneidx, int sz, int lanes)
+ : sz(sz), laneidx(laneidx % lanes) {}
+};
+} // namespace jit
+} // namespace js
+#endif
+#endif // jit_riscv64_extension_Extension_riscv_V_h_
diff --git a/js/src/jit/riscv64/extension/extension-riscv-zicsr.cc b/js/src/jit/riscv64/extension/extension-riscv-zicsr.cc
new file mode 100644
index 0000000000..7fa87393a3
--- /dev/null
+++ b/js/src/jit/riscv64/extension/extension-riscv-zicsr.cc
@@ -0,0 +1,44 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#include "jit/riscv64/extension/extension-riscv-zicsr.h"
+
+#include "jit/riscv64/constant/Constant-riscv64.h"
+#include "jit/riscv64/Register-riscv64.h"
+#include "jit/riscv64/Assembler-riscv64.h"
+#include "jit/riscv64/Architecture-riscv64.h"
+namespace js {
+namespace jit {
+
+void AssemblerRISCVZicsr::csrrw(Register rd, ControlStatusReg csr,
+ Register rs1) {
+ GenInstrCSR_ir(0b001, rd, csr, rs1);
+}
+
+void AssemblerRISCVZicsr::csrrs(Register rd, ControlStatusReg csr,
+ Register rs1) {
+ GenInstrCSR_ir(0b010, rd, csr, rs1);
+}
+
+void AssemblerRISCVZicsr::csrrc(Register rd, ControlStatusReg csr,
+ Register rs1) {
+ GenInstrCSR_ir(0b011, rd, csr, rs1);
+}
+
+void AssemblerRISCVZicsr::csrrwi(Register rd, ControlStatusReg csr,
+ uint8_t imm5) {
+ GenInstrCSR_ii(0b101, rd, csr, imm5);
+}
+
+void AssemblerRISCVZicsr::csrrsi(Register rd, ControlStatusReg csr,
+ uint8_t imm5) {
+ GenInstrCSR_ii(0b110, rd, csr, imm5);
+}
+
+void AssemblerRISCVZicsr::csrrci(Register rd, ControlStatusReg csr,
+ uint8_t imm5) {
+ GenInstrCSR_ii(0b111, rd, csr, imm5);
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/riscv64/extension/extension-riscv-zicsr.h b/js/src/jit/riscv64/extension/extension-riscv-zicsr.h
new file mode 100644
index 0000000000..e1fba4fa57
--- /dev/null
+++ b/js/src/jit/riscv64/extension/extension-riscv-zicsr.h
@@ -0,0 +1,57 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef jit_riscv64_extension_Extension_riscv_zicsr_h_
+#define jit_riscv64_extension_Extension_riscv_zicsr_h_
+#include "mozilla/Assertions.h"
+
+#include <stdint.h>
+
+#include "jit/riscv64/extension/base-assembler-riscv.h"
+#include "jit/riscv64/Register-riscv64.h"
+namespace js {
+namespace jit {
+
+class AssemblerRISCVZicsr : public AssemblerRiscvBase {
+ public:
+ // CSR
+ void csrrw(Register rd, ControlStatusReg csr, Register rs1);
+ void csrrs(Register rd, ControlStatusReg csr, Register rs1);
+ void csrrc(Register rd, ControlStatusReg csr, Register rs1);
+ void csrrwi(Register rd, ControlStatusReg csr, uint8_t imm5);
+ void csrrsi(Register rd, ControlStatusReg csr, uint8_t imm5);
+ void csrrci(Register rd, ControlStatusReg csr, uint8_t imm5);
+
+ // Read instructions-retired counter
+ void rdinstret(Register rd) { csrrs(rd, csr_instret, zero_reg); }
+ void rdinstreth(Register rd) { csrrs(rd, csr_instreth, zero_reg); }
+ void rdcycle(Register rd) { csrrs(rd, csr_cycle, zero_reg); }
+ void rdcycleh(Register rd) { csrrs(rd, csr_cycleh, zero_reg); }
+ void rdtime(Register rd) { csrrs(rd, csr_time, zero_reg); }
+ void rdtimeh(Register rd) { csrrs(rd, csr_timeh, zero_reg); }
+
+ void csrr(Register rd, ControlStatusReg csr) { csrrs(rd, csr, zero_reg); }
+ void csrw(ControlStatusReg csr, Register rs) { csrrw(zero_reg, csr, rs); }
+ void csrs(ControlStatusReg csr, Register rs) { csrrs(zero_reg, csr, rs); }
+ void csrc(ControlStatusReg csr, Register rs) { csrrc(zero_reg, csr, rs); }
+
+ void csrwi(ControlStatusReg csr, uint8_t imm) { csrrwi(zero_reg, csr, imm); }
+ void csrsi(ControlStatusReg csr, uint8_t imm) { csrrsi(zero_reg, csr, imm); }
+ void csrci(ControlStatusReg csr, uint8_t imm) { csrrci(zero_reg, csr, imm); }
+
+ void frcsr(Register rd) { csrrs(rd, csr_fcsr, zero_reg); }
+ void fscsr(Register rd, Register rs) { csrrw(rd, csr_fcsr, rs); }
+ void fscsr(Register rs) { csrrw(zero_reg, csr_fcsr, rs); }
+
+ void frrm(Register rd) { csrrs(rd, csr_frm, zero_reg); }
+ void fsrm(Register rd, Register rs) { csrrw(rd, csr_frm, rs); }
+ void fsrm(Register rs) { csrrw(zero_reg, csr_frm, rs); }
+
+ void frflags(Register rd) { csrrs(rd, csr_fflags, zero_reg); }
+ void fsflags(Register rd, Register rs) { csrrw(rd, csr_fflags, rs); }
+ void fsflags(Register rs) { csrrw(zero_reg, csr_fflags, rs); }
+};
+} // namespace jit
+} // namespace js
+#endif // jit_riscv64_extension_Extension_riscv_zicsr_h_
diff --git a/js/src/jit/riscv64/extension/extension-riscv-zifencei.cc b/js/src/jit/riscv64/extension/extension-riscv-zifencei.cc
new file mode 100644
index 0000000000..ec8080b0cb
--- /dev/null
+++ b/js/src/jit/riscv64/extension/extension-riscv-zifencei.cc
@@ -0,0 +1,17 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#include "jit/riscv64/extension/extension-riscv-zifencei.h"
+
+#include "jit/riscv64/extension/base-assembler-riscv.h"
+#include "jit/riscv64/constant/Constant-riscv64.h"
+#include "jit/riscv64/Assembler-riscv64.h"
+#include "jit/riscv64/Architecture-riscv64.h"
+namespace js {
+namespace jit {
+
+void AssemblerRISCVZifencei::fence_i() {
+ GenInstrI(0b001, MISC_MEM, ToRegister(0), ToRegister(0), 0);
+}
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/riscv64/extension/extension-riscv-zifencei.h b/js/src/jit/riscv64/extension/extension-riscv-zifencei.h
new file mode 100644
index 0000000000..a245320ec4
--- /dev/null
+++ b/js/src/jit/riscv64/extension/extension-riscv-zifencei.h
@@ -0,0 +1,20 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef jit_riscv64_extension_Extension_riscv_zifencei_h_
+#define jit_riscv64_extension_Extension_riscv_zifencei_h_
+#include "mozilla/Assertions.h"
+
+#include <stdint.h>
+
+#include "jit/riscv64/extension/base-assembler-riscv.h"
+namespace js {
+namespace jit {
+class AssemblerRISCVZifencei : public AssemblerRiscvBase {
+ public:
+ void fence_i();
+};
+} // namespace jit
+} // namespace js
+#endif // jit_riscv64_extension_Extension_riscv_zifencei_h_