summaryrefslogtreecommitdiffstats
path: root/js/src/jit/riscv64/extension
diff options
context:
space:
mode:
Diffstat (limited to 'js/src/jit/riscv64/extension')
-rw-r--r--js/src/jit/riscv64/extension/base-assembler-riscv.cc517
-rw-r--r--js/src/jit/riscv64/extension/base-assembler-riscv.h219
-rw-r--r--js/src/jit/riscv64/extension/base-riscv-i.cc351
-rw-r--r--js/src/jit/riscv64/extension/base-riscv-i.h273
-rw-r--r--js/src/jit/riscv64/extension/extension-riscv-a.cc123
-rw-r--r--js/src/jit/riscv64/extension/extension-riscv-a.h46
-rw-r--r--js/src/jit/riscv64/extension/extension-riscv-c.cc275
-rw-r--r--js/src/jit/riscv64/extension/extension-riscv-c.h77
-rw-r--r--js/src/jit/riscv64/extension/extension-riscv-d.cc167
-rw-r--r--js/src/jit/riscv64/extension/extension-riscv-d.h68
-rw-r--r--js/src/jit/riscv64/extension/extension-riscv-f.cc158
-rw-r--r--js/src/jit/riscv64/extension/extension-riscv-f.h66
-rw-r--r--js/src/jit/riscv64/extension/extension-riscv-m.cc68
-rw-r--r--js/src/jit/riscv64/extension/extension-riscv-m.h37
-rw-r--r--js/src/jit/riscv64/extension/extension-riscv-v.cc891
-rw-r--r--js/src/jit/riscv64/extension/extension-riscv-v.h484
-rw-r--r--js/src/jit/riscv64/extension/extension-riscv-zicsr.cc44
-rw-r--r--js/src/jit/riscv64/extension/extension-riscv-zicsr.h57
-rw-r--r--js/src/jit/riscv64/extension/extension-riscv-zifencei.cc17
-rw-r--r--js/src/jit/riscv64/extension/extension-riscv-zifencei.h20
20 files changed, 3958 insertions, 0 deletions
diff --git a/js/src/jit/riscv64/extension/base-assembler-riscv.cc b/js/src/jit/riscv64/extension/base-assembler-riscv.cc
new file mode 100644
index 0000000000..a64cc818b3
--- /dev/null
+++ b/js/src/jit/riscv64/extension/base-assembler-riscv.cc
@@ -0,0 +1,517 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2021 the V8 project authors. All rights reserved.
+
+#include "jit/riscv64/extension/base-assembler-riscv.h"
+#include "jit/riscv64/Architecture-riscv64.h"
+
+namespace js {
+namespace jit {
+
+int ToNumber(Register reg) {
+ MOZ_ASSERT(reg.code() < Registers::Total && reg.code() >= 0);
+ const int kNumbers[] = {
+ 0, // zero_reg
+ 1, // ra
+ 2, // sp
+ 3, // gp
+ 4, // tp
+ 5, // t0
+ 6, // t1
+ 7, // t2
+ 8, // s0/fp
+ 9, // s1
+ 10, // a0
+ 11, // a1
+ 12, // a2
+ 13, // a3
+ 14, // a4
+ 15, // a5
+ 16, // a6
+ 17, // a7
+ 18, // s2
+ 19, // s3
+ 20, // s4
+ 21, // s5
+ 22, // s6
+ 23, // s7
+ 24, // s8
+ 25, // s9
+ 26, // s10
+ 27, // s11
+ 28, // t3
+ 29, // t4
+ 30, // t5
+ 31, // t6
+ };
+ return kNumbers[reg.code()];
+}
+
+Register ToRegister(uint32_t num) {
+ MOZ_ASSERT(num >= 0 && num < Registers::Total);
+ const Register kRegisters[] = {
+ zero_reg, ra, sp, gp, tp, t0, t1, t2, fp, s1, a0, a1, a2, a3, a4, a5,
+ a6, a7, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, t3, t4, t5, t6};
+ return kRegisters[num];
+}
+
+// ----- Top-level instruction formats match those in the ISA manual
+// (R, I, S, B, U, J). These match the formats defined in the compiler
+void AssemblerRiscvBase::GenInstrR(uint8_t funct7, uint8_t funct3,
+ BaseOpcode opcode, Register rd, Register rs1,
+ Register rs2) {
+ MOZ_ASSERT(is_uint7(funct7) && is_uint3(funct3));
+ Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) |
+ (rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) |
+ (funct7 << kFunct7Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrR(uint8_t funct7, uint8_t funct3,
+ BaseOpcode opcode, FPURegister rd,
+ FPURegister rs1, FPURegister rs2) {
+ MOZ_ASSERT(is_uint7(funct7) && is_uint3(funct3));
+ Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) |
+ (rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) |
+ (funct7 << kFunct7Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrR(uint8_t funct7, uint8_t funct3,
+ BaseOpcode opcode, Register rd,
+ FPURegister rs1, Register rs2) {
+ MOZ_ASSERT(is_uint7(funct7) && is_uint3(funct3));
+ Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) |
+ (rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) |
+ (funct7 << kFunct7Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrR(uint8_t funct7, uint8_t funct3,
+ BaseOpcode opcode, FPURegister rd,
+ Register rs1, Register rs2) {
+ MOZ_ASSERT(is_uint7(funct7) && is_uint3(funct3));
+ Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) |
+ (rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) |
+ (funct7 << kFunct7Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrR(uint8_t funct7, uint8_t funct3,
+ BaseOpcode opcode, FPURegister rd,
+ FPURegister rs1, Register rs2) {
+ MOZ_ASSERT(is_uint7(funct7) && is_uint3(funct3));
+ Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) |
+ (rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) |
+ (funct7 << kFunct7Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrR(uint8_t funct7, uint8_t funct3,
+ BaseOpcode opcode, Register rd,
+ FPURegister rs1, FPURegister rs2) {
+ MOZ_ASSERT(is_uint7(funct7) && is_uint3(funct3));
+ Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) |
+ (rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) |
+ (funct7 << kFunct7Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrR4(uint8_t funct2, BaseOpcode opcode,
+ Register rd, Register rs1, Register rs2,
+ Register rs3, FPURoundingMode frm) {
+ MOZ_ASSERT(is_uint2(funct2) && is_uint3(frm));
+ Instr instr = opcode | (rd.code() << kRdShift) | (frm << kFunct3Shift) |
+ (rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) |
+ (funct2 << kFunct2Shift) | (rs3.code() << kRs3Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrR4(uint8_t funct2, BaseOpcode opcode,
+ FPURegister rd, FPURegister rs1,
+ FPURegister rs2, FPURegister rs3,
+ FPURoundingMode frm) {
+ MOZ_ASSERT(is_uint2(funct2) && is_uint3(frm));
+ Instr instr = opcode | (rd.code() << kRdShift) | (frm << kFunct3Shift) |
+ (rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) |
+ (funct2 << kFunct2Shift) | (rs3.code() << kRs3Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrRAtomic(uint8_t funct5, bool aq, bool rl,
+ uint8_t funct3, Register rd,
+ Register rs1, Register rs2) {
+ MOZ_ASSERT(is_uint5(funct5) && is_uint3(funct3));
+ Instr instr = AMO | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) |
+ (rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) |
+ (rl << kRlShift) | (aq << kAqShift) | (funct5 << kFunct5Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrRFrm(uint8_t funct7, BaseOpcode opcode,
+ Register rd, Register rs1, Register rs2,
+ FPURoundingMode frm) {
+ MOZ_ASSERT(is_uint3(frm));
+ Instr instr = opcode | (rd.code() << kRdShift) | (frm << kFunct3Shift) |
+ (rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) |
+ (funct7 << kFunct7Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrI(uint8_t funct3, BaseOpcode opcode,
+ Register rd, Register rs1, int16_t imm12) {
+ MOZ_ASSERT(is_uint3(funct3) && (is_uint12(imm12) || is_int12(imm12)));
+ Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) |
+ (rs1.code() << kRs1Shift) | (imm12 << kImm12Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrI(uint8_t funct3, BaseOpcode opcode,
+ FPURegister rd, Register rs1,
+ int16_t imm12) {
+ MOZ_ASSERT(is_uint3(funct3) && (is_uint12(imm12) || is_int12(imm12)));
+ Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) |
+ (rs1.code() << kRs1Shift) | (imm12 << kImm12Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrIShift(bool arithshift, uint8_t funct3,
+ BaseOpcode opcode, Register rd,
+ Register rs1, uint8_t shamt) {
+ MOZ_ASSERT(is_uint3(funct3) && is_uint6(shamt));
+ Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) |
+ (rs1.code() << kRs1Shift) | (shamt << kShamtShift) |
+ (arithshift << kArithShiftShift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrIShiftW(bool arithshift, uint8_t funct3,
+ BaseOpcode opcode, Register rd,
+ Register rs1, uint8_t shamt) {
+ MOZ_ASSERT(is_uint3(funct3) && is_uint5(shamt));
+ Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) |
+ (rs1.code() << kRs1Shift) | (shamt << kShamtWShift) |
+ (arithshift << kArithShiftShift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrS(uint8_t funct3, BaseOpcode opcode,
+ Register rs1, Register rs2, int16_t imm12) {
+ MOZ_ASSERT(is_uint3(funct3) && is_int12(imm12));
+ Instr instr = opcode | ((imm12 & 0x1f) << 7) | // bits 4-0
+ (funct3 << kFunct3Shift) | (rs1.code() << kRs1Shift) |
+ (rs2.code() << kRs2Shift) |
+ ((imm12 & 0xfe0) << 20); // bits 11-5
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrS(uint8_t funct3, BaseOpcode opcode,
+ Register rs1, FPURegister rs2,
+ int16_t imm12) {
+ MOZ_ASSERT(is_uint3(funct3) && is_int12(imm12));
+ Instr instr = opcode | ((imm12 & 0x1f) << 7) | // bits 4-0
+ (funct3 << kFunct3Shift) | (rs1.code() << kRs1Shift) |
+ (rs2.code() << kRs2Shift) |
+ ((imm12 & 0xfe0) << 20); // bits 11-5
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrB(uint8_t funct3, BaseOpcode opcode,
+ Register rs1, Register rs2, int16_t imm13) {
+ MOZ_ASSERT(is_uint3(funct3) && is_int13(imm13) && ((imm13 & 1) == 0));
+ Instr instr = opcode | ((imm13 & 0x800) >> 4) | // bit 11
+ ((imm13 & 0x1e) << 7) | // bits 4-1
+ (funct3 << kFunct3Shift) | (rs1.code() << kRs1Shift) |
+ (rs2.code() << kRs2Shift) |
+ ((imm13 & 0x7e0) << 20) | // bits 10-5
+ ((imm13 & 0x1000) << 19); // bit 12
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrU(BaseOpcode opcode, Register rd,
+ int32_t imm20) {
+ MOZ_ASSERT((is_int20(imm20) || is_uint20(imm20)));
+ Instr instr = opcode | (rd.code() << kRdShift) | (imm20 << kImm20Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrJ(BaseOpcode opcode, Register rd,
+ int32_t imm21) {
+ MOZ_ASSERT(is_int21(imm21) && ((imm21 & 1) == 0));
+ Instr instr = opcode | (rd.code() << kRdShift) |
+ (imm21 & 0xff000) | // bits 19-12
+ ((imm21 & 0x800) << 9) | // bit 11
+ ((imm21 & 0x7fe) << 20) | // bits 10-1
+ ((imm21 & 0x100000) << 11); // bit 20
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrCR(uint8_t funct4, BaseOpcode opcode,
+ Register rd, Register rs2) {
+ MOZ_ASSERT(is_uint4(funct4));
+ ShortInstr instr = opcode | (rs2.code() << kRvcRs2Shift) |
+ (rd.code() << kRvcRdShift) | (funct4 << kRvcFunct4Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrCA(uint8_t funct6, BaseOpcode opcode,
+ Register rd, uint8_t funct, Register rs2) {
+ MOZ_ASSERT(is_uint6(funct6) && is_uint2(funct));
+ ShortInstr instr = opcode | ((rs2.code() & 0x7) << kRvcRs2sShift) |
+ ((rd.code() & 0x7) << kRvcRs1sShift) |
+ (funct6 << kRvcFunct6Shift) | (funct << kRvcFunct2Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrCI(uint8_t funct3, BaseOpcode opcode,
+ Register rd, int8_t imm6) {
+ MOZ_ASSERT(is_uint3(funct3) && is_int6(imm6));
+ ShortInstr instr = opcode | ((imm6 & 0x1f) << 2) |
+ (rd.code() << kRvcRdShift) | ((imm6 & 0x20) << 7) |
+ (funct3 << kRvcFunct3Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrCIU(uint8_t funct3, BaseOpcode opcode,
+ Register rd, uint8_t uimm6) {
+ MOZ_ASSERT(is_uint3(funct3) && is_uint6(uimm6));
+ ShortInstr instr = opcode | ((uimm6 & 0x1f) << 2) |
+ (rd.code() << kRvcRdShift) | ((uimm6 & 0x20) << 7) |
+ (funct3 << kRvcFunct3Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrCIU(uint8_t funct3, BaseOpcode opcode,
+ FPURegister rd, uint8_t uimm6) {
+ MOZ_ASSERT(is_uint3(funct3) && is_uint6(uimm6));
+ ShortInstr instr = opcode | ((uimm6 & 0x1f) << 2) |
+ (rd.code() << kRvcRdShift) | ((uimm6 & 0x20) << 7) |
+ (funct3 << kRvcFunct3Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrCIW(uint8_t funct3, BaseOpcode opcode,
+ Register rd, uint8_t uimm8) {
+ MOZ_ASSERT(is_uint3(funct3) && is_uint8(uimm8));
+ ShortInstr instr = opcode | ((uimm8) << 5) |
+ ((rd.code() & 0x7) << kRvcRs2sShift) |
+ (funct3 << kRvcFunct3Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrCSS(uint8_t funct3, BaseOpcode opcode,
+ Register rs2, uint8_t uimm6) {
+ MOZ_ASSERT(is_uint3(funct3) && is_uint6(uimm6));
+ ShortInstr instr = opcode | (uimm6 << 7) | (rs2.code() << kRvcRs2Shift) |
+ (funct3 << kRvcFunct3Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrCSS(uint8_t funct3, BaseOpcode opcode,
+ FPURegister rs2, uint8_t uimm6) {
+ MOZ_ASSERT(is_uint3(funct3) && is_uint6(uimm6));
+ ShortInstr instr = opcode | (uimm6 << 7) | (rs2.code() << kRvcRs2Shift) |
+ (funct3 << kRvcFunct3Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrCL(uint8_t funct3, BaseOpcode opcode,
+ Register rd, Register rs1, uint8_t uimm5) {
+ MOZ_ASSERT(is_uint3(funct3) && is_uint5(uimm5));
+ ShortInstr instr = opcode | ((uimm5 & 0x3) << 5) |
+ ((rd.code() & 0x7) << kRvcRs2sShift) |
+ ((uimm5 & 0x1c) << 8) | (funct3 << kRvcFunct3Shift) |
+ ((rs1.code() & 0x7) << kRvcRs1sShift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrCL(uint8_t funct3, BaseOpcode opcode,
+ FPURegister rd, Register rs1,
+ uint8_t uimm5) {
+ MOZ_ASSERT(is_uint3(funct3) && is_uint5(uimm5));
+ ShortInstr instr = opcode | ((uimm5 & 0x3) << 5) |
+ ((rd.code() & 0x7) << kRvcRs2sShift) |
+ ((uimm5 & 0x1c) << 8) | (funct3 << kRvcFunct3Shift) |
+ ((rs1.code() & 0x7) << kRvcRs1sShift);
+ emit(instr);
+}
+void AssemblerRiscvBase::GenInstrCJ(uint8_t funct3, BaseOpcode opcode,
+ uint16_t uint11) {
+ MOZ_ASSERT(is_uint11(uint11));
+ ShortInstr instr = opcode | (funct3 << kRvcFunct3Shift) | (uint11 << 2);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrCS(uint8_t funct3, BaseOpcode opcode,
+ Register rs2, Register rs1, uint8_t uimm5) {
+ MOZ_ASSERT(is_uint3(funct3) && is_uint5(uimm5));
+ ShortInstr instr = opcode | ((uimm5 & 0x3) << 5) |
+ ((rs2.code() & 0x7) << kRvcRs2sShift) |
+ ((uimm5 & 0x1c) << 8) | (funct3 << kRvcFunct3Shift) |
+ ((rs1.code() & 0x7) << kRvcRs1sShift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrCS(uint8_t funct3, BaseOpcode opcode,
+ FPURegister rs2, Register rs1,
+ uint8_t uimm5) {
+ MOZ_ASSERT(is_uint3(funct3) && is_uint5(uimm5));
+ ShortInstr instr = opcode | ((uimm5 & 0x3) << 5) |
+ ((rs2.code() & 0x7) << kRvcRs2sShift) |
+ ((uimm5 & 0x1c) << 8) | (funct3 << kRvcFunct3Shift) |
+ ((rs1.code() & 0x7) << kRvcRs1sShift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrCB(uint8_t funct3, BaseOpcode opcode,
+ Register rs1, uint8_t uimm8) {
+ MOZ_ASSERT(is_uint3(funct3) && is_uint8(uimm8));
+ ShortInstr instr = opcode | ((uimm8 & 0x1f) << 2) | ((uimm8 & 0xe0) << 5) |
+ ((rs1.code() & 0x7) << kRvcRs1sShift) |
+ (funct3 << kRvcFunct3Shift);
+ emit(instr);
+}
+
+void AssemblerRiscvBase::GenInstrCBA(uint8_t funct3, uint8_t funct2,
+ BaseOpcode opcode, Register rs1,
+ int8_t imm6) {
+ MOZ_ASSERT(is_uint3(funct3) && is_uint2(funct2) && is_int6(imm6));
+ ShortInstr instr = opcode | ((imm6 & 0x1f) << 2) | ((imm6 & 0x20) << 7) |
+ ((rs1.code() & 0x7) << kRvcRs1sShift) |
+ (funct3 << kRvcFunct3Shift) | (funct2 << 10);
+ emit(instr);
+}
+// ----- Instruction class templates match those in the compiler
+
+void AssemblerRiscvBase::GenInstrBranchCC_rri(uint8_t funct3, Register rs1,
+ Register rs2, int16_t imm13) {
+ GenInstrB(funct3, BRANCH, rs1, rs2, imm13);
+}
+
+void AssemblerRiscvBase::GenInstrLoad_ri(uint8_t funct3, Register rd,
+ Register rs1, int16_t imm12) {
+ GenInstrI(funct3, LOAD, rd, rs1, imm12);
+}
+
+void AssemblerRiscvBase::GenInstrStore_rri(uint8_t funct3, Register rs1,
+ Register rs2, int16_t imm12) {
+ GenInstrS(funct3, STORE, rs1, rs2, imm12);
+}
+
+void AssemblerRiscvBase::GenInstrALU_ri(uint8_t funct3, Register rd,
+ Register rs1, int16_t imm12) {
+ GenInstrI(funct3, OP_IMM, rd, rs1, imm12);
+}
+
+void AssemblerRiscvBase::GenInstrShift_ri(bool arithshift, uint8_t funct3,
+ Register rd, Register rs1,
+ uint8_t shamt) {
+ MOZ_ASSERT(is_uint6(shamt));
+ GenInstrI(funct3, OP_IMM, rd, rs1, (arithshift << 10) | shamt);
+}
+
+void AssemblerRiscvBase::GenInstrALU_rr(uint8_t funct7, uint8_t funct3,
+ Register rd, Register rs1,
+ Register rs2) {
+ GenInstrR(funct7, funct3, OP, rd, rs1, rs2);
+}
+
+void AssemblerRiscvBase::GenInstrCSR_ir(uint8_t funct3, Register rd,
+ ControlStatusReg csr, Register rs1) {
+ GenInstrI(funct3, SYSTEM, rd, rs1, csr);
+}
+
+void AssemblerRiscvBase::GenInstrCSR_ii(uint8_t funct3, Register rd,
+ ControlStatusReg csr, uint8_t imm5) {
+ GenInstrI(funct3, SYSTEM, rd, ToRegister(imm5), csr);
+}
+
+void AssemblerRiscvBase::GenInstrShiftW_ri(bool arithshift, uint8_t funct3,
+ Register rd, Register rs1,
+ uint8_t shamt) {
+ GenInstrIShiftW(arithshift, funct3, OP_IMM_32, rd, rs1, shamt);
+}
+
+void AssemblerRiscvBase::GenInstrALUW_rr(uint8_t funct7, uint8_t funct3,
+ Register rd, Register rs1,
+ Register rs2) {
+ GenInstrR(funct7, funct3, OP_32, rd, rs1, rs2);
+}
+
+void AssemblerRiscvBase::GenInstrPriv(uint8_t funct7, Register rs1,
+ Register rs2) {
+ GenInstrR(funct7, 0b000, SYSTEM, ToRegister(0UL), rs1, rs2);
+}
+
+void AssemblerRiscvBase::GenInstrLoadFP_ri(uint8_t funct3, FPURegister rd,
+ Register rs1, int16_t imm12) {
+ GenInstrI(funct3, LOAD_FP, rd, rs1, imm12);
+}
+
+void AssemblerRiscvBase::GenInstrStoreFP_rri(uint8_t funct3, Register rs1,
+ FPURegister rs2, int16_t imm12) {
+ GenInstrS(funct3, STORE_FP, rs1, rs2, imm12);
+}
+
+void AssemblerRiscvBase::GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3,
+ FPURegister rd, FPURegister rs1,
+ FPURegister rs2) {
+ GenInstrR(funct7, funct3, OP_FP, rd, rs1, rs2);
+}
+
+void AssemblerRiscvBase::GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3,
+ FPURegister rd, Register rs1,
+ Register rs2) {
+ GenInstrR(funct7, funct3, OP_FP, rd, rs1, rs2);
+}
+
+void AssemblerRiscvBase::GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3,
+ FPURegister rd, FPURegister rs1,
+ Register rs2) {
+ GenInstrR(funct7, funct3, OP_FP, rd, rs1, rs2);
+}
+
+void AssemblerRiscvBase::GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3,
+ Register rd, FPURegister rs1,
+ Register rs2) {
+ GenInstrR(funct7, funct3, OP_FP, rd, rs1, rs2);
+}
+
+void AssemblerRiscvBase::GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3,
+ Register rd, FPURegister rs1,
+ FPURegister rs2) {
+ GenInstrR(funct7, funct3, OP_FP, rd, rs1, rs2);
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/riscv64/extension/base-assembler-riscv.h b/js/src/jit/riscv64/extension/base-assembler-riscv.h
new file mode 100644
index 0000000000..cb3083d365
--- /dev/null
+++ b/js/src/jit/riscv64/extension/base-assembler-riscv.h
@@ -0,0 +1,219 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2021 the V8 project authors. All rights reserved.
+
+#ifndef jit_riscv64_extension_Base_assembler_riscv_h
+#define jit_riscv64_extension_Base_assembler_riscv_h
+
+#include <memory>
+#include <set>
+#include <stdio.h>
+
+#include "jit/Label.h"
+#include "jit/riscv64/Architecture-riscv64.h"
+#include "jit/riscv64/constant/Constant-riscv64.h"
+#include "jit/riscv64/Register-riscv64.h"
+
+#define xlen (uint8_t(sizeof(void*) * 8))
+
+#define kBitsPerByte 8UL
+// Check number width.
+inline constexpr bool is_intn(int64_t x, unsigned n) {
+ MOZ_ASSERT((0 < n) && (n < 64));
+ int64_t limit = static_cast<int64_t>(1) << (n - 1);
+ return (-limit <= x) && (x < limit);
+}
+
+inline constexpr bool is_uintn(int64_t x, unsigned n) {
+ MOZ_ASSERT((0 < n) && (n < (sizeof(x) * kBitsPerByte)));
+ return !(x >> n);
+}
+#undef kBitsPerByte
+// clang-format off
+#define INT_1_TO_63_LIST(V) \
+ V(1) V(2) V(3) V(4) V(5) V(6) V(7) V(8) V(9) V(10) \
+ V(11) V(12) V(13) V(14) V(15) V(16) V(17) V(18) V(19) V(20) \
+ V(21) V(22) V(23) V(24) V(25) V(26) V(27) V(28) V(29) V(30) \
+ V(31) V(32) V(33) V(34) V(35) V(36) V(37) V(38) V(39) V(40) \
+ V(41) V(42) V(43) V(44) V(45) V(46) V(47) V(48) V(49) V(50) \
+ V(51) V(52) V(53) V(54) V(55) V(56) V(57) V(58) V(59) V(60) \
+ V(61) V(62) V(63)
+// clang-format on
+
+#define DECLARE_IS_INT_N(N) \
+ inline constexpr bool is_int##N(int64_t x) { return is_intn(x, N); }
+
+#define DECLARE_IS_UINT_N(N) \
+ template <class T> \
+ inline constexpr bool is_uint##N(T x) { \
+ return is_uintn(x, N); \
+ }
+INT_1_TO_63_LIST(DECLARE_IS_INT_N)
+INT_1_TO_63_LIST(DECLARE_IS_UINT_N)
+
+#undef DECLARE_IS_INT_N
+#undef INT_1_TO_63_LIST
+
+namespace js {
+namespace jit {
+
+typedef FloatRegister FPURegister;
+#define zero_reg zero
+
+#define DEBUG_PRINTF(...) \
+ if (FLAG_riscv_debug) { \
+ std::printf(__VA_ARGS__); \
+ }
+
+int ToNumber(Register reg);
+Register ToRegister(uint32_t num);
+
+class AssemblerRiscvBase {
+ protected:
+ virtual int32_t branch_offset_helper(Label* L, OffsetSize bits) = 0;
+
+ virtual void emit(Instr x) = 0;
+ virtual void emit(ShortInstr x) = 0;
+ virtual void emit(uint64_t x) = 0;
+ virtual uint32_t currentOffset() = 0;
+ // Instruction generation.
+
+ // ----- Top-level instruction formats match those in the ISA manual
+ // (R, I, S, B, U, J). These match the formats defined in LLVM's
+ // RISCVInstrFormats.td.
+ void GenInstrR(uint8_t funct7, uint8_t funct3, BaseOpcode opcode, Register rd,
+ Register rs1, Register rs2);
+ void GenInstrR(uint8_t funct7, uint8_t funct3, BaseOpcode opcode,
+ FPURegister rd, FPURegister rs1, FPURegister rs2);
+ void GenInstrR(uint8_t funct7, uint8_t funct3, BaseOpcode opcode, Register rd,
+ FPURegister rs1, Register rs2);
+ void GenInstrR(uint8_t funct7, uint8_t funct3, BaseOpcode opcode,
+ FPURegister rd, Register rs1, Register rs2);
+ void GenInstrR(uint8_t funct7, uint8_t funct3, BaseOpcode opcode,
+ FPURegister rd, FPURegister rs1, Register rs2);
+ void GenInstrR(uint8_t funct7, uint8_t funct3, BaseOpcode opcode, Register rd,
+ FPURegister rs1, FPURegister rs2);
+ void GenInstrR4(uint8_t funct2, BaseOpcode opcode, Register rd, Register rs1,
+ Register rs2, Register rs3, FPURoundingMode frm);
+ void GenInstrR4(uint8_t funct2, BaseOpcode opcode, FPURegister rd,
+ FPURegister rs1, FPURegister rs2, FPURegister rs3,
+ FPURoundingMode frm);
+ void GenInstrRAtomic(uint8_t funct5, bool aq, bool rl, uint8_t funct3,
+ Register rd, Register rs1, Register rs2);
+ void GenInstrRFrm(uint8_t funct7, BaseOpcode opcode, Register rd,
+ Register rs1, Register rs2, FPURoundingMode frm);
+ void GenInstrI(uint8_t funct3, BaseOpcode opcode, Register rd, Register rs1,
+ int16_t imm12);
+ void GenInstrI(uint8_t funct3, BaseOpcode opcode, FPURegister rd,
+ Register rs1, int16_t imm12);
+ void GenInstrIShift(bool arithshift, uint8_t funct3, BaseOpcode opcode,
+ Register rd, Register rs1, uint8_t shamt);
+ void GenInstrIShiftW(bool arithshift, uint8_t funct3, BaseOpcode opcode,
+ Register rd, Register rs1, uint8_t shamt);
+ void GenInstrS(uint8_t funct3, BaseOpcode opcode, Register rs1, Register rs2,
+ int16_t imm12);
+ void GenInstrS(uint8_t funct3, BaseOpcode opcode, Register rs1,
+ FPURegister rs2, int16_t imm12);
+ void GenInstrB(uint8_t funct3, BaseOpcode opcode, Register rs1, Register rs2,
+ int16_t imm12);
+ void GenInstrU(BaseOpcode opcode, Register rd, int32_t imm20);
+ void GenInstrJ(BaseOpcode opcode, Register rd, int32_t imm20);
+ void GenInstrCR(uint8_t funct4, BaseOpcode opcode, Register rd, Register rs2);
+ void GenInstrCA(uint8_t funct6, BaseOpcode opcode, Register rd, uint8_t funct,
+ Register rs2);
+ void GenInstrCI(uint8_t funct3, BaseOpcode opcode, Register rd, int8_t imm6);
+ void GenInstrCIU(uint8_t funct3, BaseOpcode opcode, Register rd,
+ uint8_t uimm6);
+ void GenInstrCIU(uint8_t funct3, BaseOpcode opcode, FPURegister rd,
+ uint8_t uimm6);
+ void GenInstrCIW(uint8_t funct3, BaseOpcode opcode, Register rd,
+ uint8_t uimm8);
+ void GenInstrCSS(uint8_t funct3, BaseOpcode opcode, FPURegister rs2,
+ uint8_t uimm6);
+ void GenInstrCSS(uint8_t funct3, BaseOpcode opcode, Register rs2,
+ uint8_t uimm6);
+ void GenInstrCL(uint8_t funct3, BaseOpcode opcode, Register rd, Register rs1,
+ uint8_t uimm5);
+ void GenInstrCL(uint8_t funct3, BaseOpcode opcode, FPURegister rd,
+ Register rs1, uint8_t uimm5);
+ void GenInstrCS(uint8_t funct3, BaseOpcode opcode, Register rs2, Register rs1,
+ uint8_t uimm5);
+ void GenInstrCS(uint8_t funct3, BaseOpcode opcode, FPURegister rs2,
+ Register rs1, uint8_t uimm5);
+ void GenInstrCJ(uint8_t funct3, BaseOpcode opcode, uint16_t uint11);
+ void GenInstrCB(uint8_t funct3, BaseOpcode opcode, Register rs1,
+ uint8_t uimm8);
+ void GenInstrCBA(uint8_t funct3, uint8_t funct2, BaseOpcode opcode,
+ Register rs1, int8_t imm6);
+
+ // ----- Instruction class templates match those in LLVM's RISCVInstrInfo.td
+ void GenInstrBranchCC_rri(uint8_t funct3, Register rs1, Register rs2,
+ int16_t imm12);
+ void GenInstrLoad_ri(uint8_t funct3, Register rd, Register rs1,
+ int16_t imm12);
+ void GenInstrStore_rri(uint8_t funct3, Register rs1, Register rs2,
+ int16_t imm12);
+ void GenInstrALU_ri(uint8_t funct3, Register rd, Register rs1, int16_t imm12);
+ void GenInstrShift_ri(bool arithshift, uint8_t funct3, Register rd,
+ Register rs1, uint8_t shamt);
+ void GenInstrALU_rr(uint8_t funct7, uint8_t funct3, Register rd, Register rs1,
+ Register rs2);
+ void GenInstrCSR_ir(uint8_t funct3, Register rd, ControlStatusReg csr,
+ Register rs1);
+ void GenInstrCSR_ii(uint8_t funct3, Register rd, ControlStatusReg csr,
+ uint8_t rs1);
+ void GenInstrShiftW_ri(bool arithshift, uint8_t funct3, Register rd,
+ Register rs1, uint8_t shamt);
+ void GenInstrALUW_rr(uint8_t funct7, uint8_t funct3, Register rd,
+ Register rs1, Register rs2);
+ void GenInstrPriv(uint8_t funct7, Register rs1, Register rs2);
+ void GenInstrLoadFP_ri(uint8_t funct3, FPURegister rd, Register rs1,
+ int16_t imm12);
+ void GenInstrStoreFP_rri(uint8_t funct3, Register rs1, FPURegister rs2,
+ int16_t imm12);
+ void GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3, FPURegister rd,
+ FPURegister rs1, FPURegister rs2);
+ void GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3, FPURegister rd,
+ Register rs1, Register rs2);
+ void GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3, FPURegister rd,
+ FPURegister rs1, Register rs2);
+ void GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3, Register rd,
+ FPURegister rs1, Register rs2);
+ void GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3, Register rd,
+ FPURegister rs1, FPURegister rs2);
+};
+
+} // namespace jit
+} // namespace js
+
+#endif // jit_riscv64_extension_Base_assembler_riscv_h
diff --git a/js/src/jit/riscv64/extension/base-riscv-i.cc b/js/src/jit/riscv64/extension/base-riscv-i.cc
new file mode 100644
index 0000000000..2ee8877eb1
--- /dev/null
+++ b/js/src/jit/riscv64/extension/base-riscv-i.cc
@@ -0,0 +1,351 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#include "jit/riscv64/extension/base-riscv-i.h"
+#include "jit/riscv64/constant/Constant-riscv64.h"
+#include "jit/riscv64/Assembler-riscv64.h"
+#include "jit/riscv64/Architecture-riscv64.h"
+namespace js {
+namespace jit {
+
+void AssemblerRISCVI::lui(Register rd, int32_t imm20) {
+ GenInstrU(LUI, rd, imm20);
+}
+
+void AssemblerRISCVI::auipc(Register rd, int32_t imm20) {
+ GenInstrU(AUIPC, rd, imm20);
+}
+
+// Jumps
+
+void AssemblerRISCVI::jal(Register rd, int32_t imm21) {
+ GenInstrJ(JAL, rd, imm21);
+}
+
+void AssemblerRISCVI::jalr(Register rd, Register rs1, int16_t imm12) {
+ GenInstrI(0b000, JALR, rd, rs1, imm12);
+}
+
+// Branches
+
+void AssemblerRISCVI::beq(Register rs1, Register rs2, int16_t imm13) {
+ GenInstrBranchCC_rri(0b000, rs1, rs2, imm13);
+}
+
+void AssemblerRISCVI::bne(Register rs1, Register rs2, int16_t imm13) {
+ GenInstrBranchCC_rri(0b001, rs1, rs2, imm13);
+}
+
+void AssemblerRISCVI::blt(Register rs1, Register rs2, int16_t imm13) {
+ GenInstrBranchCC_rri(0b100, rs1, rs2, imm13);
+}
+
+void AssemblerRISCVI::bge(Register rs1, Register rs2, int16_t imm13) {
+ GenInstrBranchCC_rri(0b101, rs1, rs2, imm13);
+}
+
+void AssemblerRISCVI::bltu(Register rs1, Register rs2, int16_t imm13) {
+ GenInstrBranchCC_rri(0b110, rs1, rs2, imm13);
+}
+
+void AssemblerRISCVI::bgeu(Register rs1, Register rs2, int16_t imm13) {
+ GenInstrBranchCC_rri(0b111, rs1, rs2, imm13);
+}
+
+// Loads
+
+void AssemblerRISCVI::lb(Register rd, Register rs1, int16_t imm12) {
+ GenInstrLoad_ri(0b000, rd, rs1, imm12);
+}
+
+void AssemblerRISCVI::lh(Register rd, Register rs1, int16_t imm12) {
+ GenInstrLoad_ri(0b001, rd, rs1, imm12);
+}
+
+void AssemblerRISCVI::lw(Register rd, Register rs1, int16_t imm12) {
+ GenInstrLoad_ri(0b010, rd, rs1, imm12);
+}
+
+void AssemblerRISCVI::lbu(Register rd, Register rs1, int16_t imm12) {
+ GenInstrLoad_ri(0b100, rd, rs1, imm12);
+}
+
+void AssemblerRISCVI::lhu(Register rd, Register rs1, int16_t imm12) {
+ GenInstrLoad_ri(0b101, rd, rs1, imm12);
+}
+
+// Stores
+
+void AssemblerRISCVI::sb(Register source, Register base, int16_t imm12) {
+ GenInstrStore_rri(0b000, base, source, imm12);
+}
+
+void AssemblerRISCVI::sh(Register source, Register base, int16_t imm12) {
+ GenInstrStore_rri(0b001, base, source, imm12);
+}
+
+void AssemblerRISCVI::sw(Register source, Register base, int16_t imm12) {
+ GenInstrStore_rri(0b010, base, source, imm12);
+}
+
+// Arithmetic with immediate
+
+void AssemblerRISCVI::addi(Register rd, Register rs1, int16_t imm12) {
+ GenInstrALU_ri(0b000, rd, rs1, imm12);
+}
+
+void AssemblerRISCVI::slti(Register rd, Register rs1, int16_t imm12) {
+ GenInstrALU_ri(0b010, rd, rs1, imm12);
+}
+
+void AssemblerRISCVI::sltiu(Register rd, Register rs1, int16_t imm12) {
+ GenInstrALU_ri(0b011, rd, rs1, imm12);
+}
+
+void AssemblerRISCVI::xori(Register rd, Register rs1, int16_t imm12) {
+ GenInstrALU_ri(0b100, rd, rs1, imm12);
+}
+
+void AssemblerRISCVI::ori(Register rd, Register rs1, int16_t imm12) {
+ GenInstrALU_ri(0b110, rd, rs1, imm12);
+}
+
+void AssemblerRISCVI::andi(Register rd, Register rs1, int16_t imm12) {
+ GenInstrALU_ri(0b111, rd, rs1, imm12);
+}
+
+void AssemblerRISCVI::slli(Register rd, Register rs1, uint8_t shamt) {
+ GenInstrShift_ri(0, 0b001, rd, rs1, shamt & 0x3f);
+}
+
+void AssemblerRISCVI::srli(Register rd, Register rs1, uint8_t shamt) {
+ GenInstrShift_ri(0, 0b101, rd, rs1, shamt & 0x3f);
+}
+
+void AssemblerRISCVI::srai(Register rd, Register rs1, uint8_t shamt) {
+ GenInstrShift_ri(1, 0b101, rd, rs1, shamt & 0x3f);
+}
+
+// Arithmetic
+
+void AssemblerRISCVI::add(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000000, 0b000, rd, rs1, rs2);
+}
+
+void AssemblerRISCVI::sub(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0100000, 0b000, rd, rs1, rs2);
+}
+
+void AssemblerRISCVI::sll(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000000, 0b001, rd, rs1, rs2);
+}
+
+void AssemblerRISCVI::slt(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000000, 0b010, rd, rs1, rs2);
+}
+
+void AssemblerRISCVI::sltu(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000000, 0b011, rd, rs1, rs2);
+}
+
+void AssemblerRISCVI::xor_(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000000, 0b100, rd, rs1, rs2);
+}
+
+void AssemblerRISCVI::srl(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000000, 0b101, rd, rs1, rs2);
+}
+
+void AssemblerRISCVI::sra(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0100000, 0b101, rd, rs1, rs2);
+}
+
+void AssemblerRISCVI::or_(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000000, 0b110, rd, rs1, rs2);
+}
+
+void AssemblerRISCVI::and_(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000000, 0b111, rd, rs1, rs2);
+}
+
+// Memory fences
+
+void AssemblerRISCVI::fence(uint8_t pred, uint8_t succ) {
+ MOZ_ASSERT(is_uint4(pred) && is_uint4(succ));
+ uint16_t imm12 = succ | (pred << 4) | (0b0000 << 8);
+ GenInstrI(0b000, MISC_MEM, ToRegister(0UL), ToRegister(0UL), imm12);
+}
+
+void AssemblerRISCVI::fence_tso() {
+ uint16_t imm12 = (0b0011) | (0b0011 << 4) | (0b1000 << 8);
+ GenInstrI(0b000, MISC_MEM, ToRegister(0UL), ToRegister(0UL), imm12);
+}
+
+// Environment call / break
+
+void AssemblerRISCVI::ecall() {
+ GenInstrI(0b000, SYSTEM, ToRegister(0UL), ToRegister(0UL), 0);
+}
+
+void AssemblerRISCVI::ebreak() {
+ GenInstrI(0b000, SYSTEM, ToRegister(0UL), ToRegister(0UL), 1);
+}
+
+// This is a de facto standard (as set by GNU binutils) 32-bit unimplemented
+// instruction (i.e., it should always trap, if your implementation has invalid
+// instruction traps).
+void AssemblerRISCVI::unimp() {
+ GenInstrI(0b001, SYSTEM, ToRegister(0), ToRegister(0), 0b110000000000);
+}
+
+bool AssemblerRISCVI::IsBranch(Instr instr) {
+ return (instr & kBaseOpcodeMask) == BRANCH;
+}
+
+bool AssemblerRISCVI::IsJump(Instr instr) {
+ int Op = instr & kBaseOpcodeMask;
+ return Op == JAL || Op == JALR;
+}
+
+bool AssemblerRISCVI::IsNop(Instr instr) { return instr == kNopByte; }
+
+bool AssemblerRISCVI::IsJal(Instr instr) {
+ return (instr & kBaseOpcodeMask) == JAL;
+}
+
+bool AssemblerRISCVI::IsJalr(Instr instr) {
+ return (instr & kBaseOpcodeMask) == JALR;
+}
+
+bool AssemblerRISCVI::IsLui(Instr instr) {
+ return (instr & kBaseOpcodeMask) == LUI;
+}
+bool AssemblerRISCVI::IsAuipc(Instr instr) {
+ return (instr & kBaseOpcodeMask) == AUIPC;
+}
+bool AssemblerRISCVI::IsAddi(Instr instr) {
+ return (instr & (kBaseOpcodeMask | kFunct3Mask)) == RO_ADDI;
+}
+bool AssemblerRISCVI::IsOri(Instr instr) {
+ return (instr & (kBaseOpcodeMask | kFunct3Mask)) == RO_ORI;
+}
+bool AssemblerRISCVI::IsSlli(Instr instr) {
+ return (instr & (kBaseOpcodeMask | kFunct3Mask)) == RO_SLLI;
+}
+
+int AssemblerRISCVI::JumpOffset(Instr instr) {
+ int32_t imm21 = ((instr & 0x7fe00000) >> 20) | ((instr & 0x100000) >> 9) |
+ (instr & 0xff000) | ((instr & 0x80000000) >> 11);
+ imm21 = imm21 << 11 >> 11;
+ return imm21;
+}
+
+int AssemblerRISCVI::JalrOffset(Instr instr) {
+ MOZ_ASSERT(IsJalr(instr));
+ int32_t imm12 = static_cast<int32_t>(instr & kImm12Mask) >> 20;
+ return imm12;
+}
+
+int AssemblerRISCVI::AuipcOffset(Instr instr) {
+ MOZ_ASSERT(IsAuipc(instr));
+ int32_t imm20 = static_cast<int32_t>(instr & kImm20Mask);
+ return imm20;
+}
+
+bool AssemblerRISCVI::IsLw(Instr instr) {
+ return (instr & (kBaseOpcodeMask | kFunct3Mask)) == RO_LW;
+}
+
+int AssemblerRISCVI::LoadOffset(Instr instr) {
+#if JS_CODEGEN_RISCV64
+ MOZ_ASSERT(IsLd(instr));
+#elif V8_TARGET_ARCH_RISCV32
+ MOZ_ASSERT(IsLw(instr));
+#endif
+ int32_t imm12 = static_cast<int32_t>(instr & kImm12Mask) >> 20;
+ return imm12;
+}
+
+#ifdef JS_CODEGEN_RISCV64
+
+bool AssemblerRISCVI::IsAddiw(Instr instr) {
+ return (instr & (kBaseOpcodeMask | kFunct3Mask)) == RO_ADDIW;
+}
+
+bool AssemblerRISCVI::IsLd(Instr instr) {
+ return (instr & (kBaseOpcodeMask | kFunct3Mask)) == RO_LD;
+}
+
+void AssemblerRISCVI::lwu(Register rd, Register rs1, int16_t imm12) {
+ GenInstrLoad_ri(0b110, rd, rs1, imm12);
+}
+
+void AssemblerRISCVI::ld(Register rd, Register rs1, int16_t imm12) {
+ GenInstrLoad_ri(0b011, rd, rs1, imm12);
+}
+
+void AssemblerRISCVI::sd(Register source, Register base, int16_t imm12) {
+ GenInstrStore_rri(0b011, base, source, imm12);
+}
+
+void AssemblerRISCVI::addiw(Register rd, Register rs1, int16_t imm12) {
+ GenInstrI(0b000, OP_IMM_32, rd, rs1, imm12);
+}
+
+void AssemblerRISCVI::slliw(Register rd, Register rs1, uint8_t shamt) {
+ GenInstrShiftW_ri(0, 0b001, rd, rs1, shamt & 0x1f);
+}
+
+void AssemblerRISCVI::srliw(Register rd, Register rs1, uint8_t shamt) {
+ GenInstrShiftW_ri(0, 0b101, rd, rs1, shamt & 0x1f);
+}
+
+void AssemblerRISCVI::sraiw(Register rd, Register rs1, uint8_t shamt) {
+ GenInstrShiftW_ri(1, 0b101, rd, rs1, shamt & 0x1f);
+}
+
+void AssemblerRISCVI::addw(Register rd, Register rs1, Register rs2) {
+ GenInstrALUW_rr(0b0000000, 0b000, rd, rs1, rs2);
+}
+
+void AssemblerRISCVI::subw(Register rd, Register rs1, Register rs2) {
+ GenInstrALUW_rr(0b0100000, 0b000, rd, rs1, rs2);
+}
+
+void AssemblerRISCVI::sllw(Register rd, Register rs1, Register rs2) {
+ GenInstrALUW_rr(0b0000000, 0b001, rd, rs1, rs2);
+}
+
+void AssemblerRISCVI::srlw(Register rd, Register rs1, Register rs2) {
+ GenInstrALUW_rr(0b0000000, 0b101, rd, rs1, rs2);
+}
+
+void AssemblerRISCVI::sraw(Register rd, Register rs1, Register rs2) {
+ GenInstrALUW_rr(0b0100000, 0b101, rd, rs1, rs2);
+}
+
+#endif
+
+int AssemblerRISCVI::BranchOffset(Instr instr) {
+ // | imm[12] | imm[10:5] | rs2 | rs1 | funct3 | imm[4:1|11] | opcode |
+ // 31 25 11 7
+ int32_t imm13 = ((instr & 0xf00) >> 7) | ((instr & 0x7e000000) >> 20) |
+ ((instr & 0x80) << 4) | ((instr & 0x80000000) >> 19);
+ imm13 = imm13 << 19 >> 19;
+ return imm13;
+}
+
+int AssemblerRISCVI::BrachlongOffset(Instr auipc, Instr instr_I) {
+ MOZ_ASSERT(reinterpret_cast<Instruction*>(&instr_I)->InstructionType() ==
+ InstructionBase::kIType);
+ MOZ_ASSERT(IsAuipc(auipc));
+ MOZ_ASSERT(((auipc & kRdFieldMask) >> kRdShift) ==
+ ((instr_I & kRs1FieldMask) >> kRs1Shift));
+ int32_t imm_auipc = AuipcOffset(auipc);
+ int32_t imm12 = static_cast<int32_t>(instr_I & kImm12Mask) >> 20;
+ int32_t offset = imm12 + imm_auipc;
+ return offset;
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/riscv64/extension/base-riscv-i.h b/js/src/jit/riscv64/extension/base-riscv-i.h
new file mode 100644
index 0000000000..cca342c960
--- /dev/null
+++ b/js/src/jit/riscv64/extension/base-riscv-i.h
@@ -0,0 +1,273 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#ifndef jit_riscv64_extension_Base_riscv_i_h_
+#define jit_riscv64_extension_Base_riscv_i_h_
+#include "mozilla/Assertions.h"
+
+#include <stdint.h>
+
+#include "jit/riscv64/constant/Constant-riscv64.h"
+#include "jit/riscv64/extension/base-assembler-riscv.h"
+namespace js {
+namespace jit {
+
+class AssemblerRISCVI : public AssemblerRiscvBase {
+ public:
+ void lui(Register rd, int32_t imm20);
+ void auipc(Register rd, int32_t imm20);
+
+ // Jumps
+ void jal(Register rd, int32_t imm20);
+ void jalr(Register rd, Register rs1, int16_t imm12);
+
+ // Branches
+ void beq(Register rs1, Register rs2, int16_t imm12);
+ void bne(Register rs1, Register rs2, int16_t imm12);
+ void blt(Register rs1, Register rs2, int16_t imm12);
+ void bge(Register rs1, Register rs2, int16_t imm12);
+ void bltu(Register rs1, Register rs2, int16_t imm12);
+ void bgeu(Register rs1, Register rs2, int16_t imm12);
+ // Loads
+ void lb(Register rd, Register rs1, int16_t imm12);
+ void lh(Register rd, Register rs1, int16_t imm12);
+ void lw(Register rd, Register rs1, int16_t imm12);
+ void lbu(Register rd, Register rs1, int16_t imm12);
+ void lhu(Register rd, Register rs1, int16_t imm12);
+
+ // Stores
+ void sb(Register source, Register base, int16_t imm12);
+ void sh(Register source, Register base, int16_t imm12);
+ void sw(Register source, Register base, int16_t imm12);
+
+ // Arithmetic with immediate
+ void addi(Register rd, Register rs1, int16_t imm12);
+ void slti(Register rd, Register rs1, int16_t imm12);
+ void sltiu(Register rd, Register rs1, int16_t imm12);
+ void xori(Register rd, Register rs1, int16_t imm12);
+ void ori(Register rd, Register rs1, int16_t imm12);
+ void andi(Register rd, Register rs1, int16_t imm12);
+ void slli(Register rd, Register rs1, uint8_t shamt);
+ void srli(Register rd, Register rs1, uint8_t shamt);
+ void srai(Register rd, Register rs1, uint8_t shamt);
+
+ // Arithmetic
+ void add(Register rd, Register rs1, Register rs2);
+ void sub(Register rd, Register rs1, Register rs2);
+ void sll(Register rd, Register rs1, Register rs2);
+ void slt(Register rd, Register rs1, Register rs2);
+ void sltu(Register rd, Register rs1, Register rs2);
+ void xor_(Register rd, Register rs1, Register rs2);
+ void srl(Register rd, Register rs1, Register rs2);
+ void sra(Register rd, Register rs1, Register rs2);
+ void or_(Register rd, Register rs1, Register rs2);
+ void and_(Register rd, Register rs1, Register rs2);
+
+ // Other pseudo instructions that are not part of RISCV pseudo assemly
+ void nor(Register rd, Register rs, Register rt) {
+ or_(rd, rs, rt);
+ not_(rd, rd);
+ }
+
+ // Memory fences
+ void fence(uint8_t pred, uint8_t succ);
+ void fence_tso();
+
+ // Environment call / break
+ void ecall();
+ void ebreak();
+
+ void sync() { fence(0b1111, 0b1111); }
+
+ // This is a de facto standard (as set by GNU binutils) 32-bit unimplemented
+ // instruction (i.e., it should always trap, if your implementation has
+ // invalid instruction traps).
+ void unimp();
+
+ static int JumpOffset(Instr instr);
+ static int AuipcOffset(Instr instr);
+ static int JalrOffset(Instr instr);
+ static int LoadOffset(Instr instr);
+ static int BranchOffset(Instr instr);
+ static int BrachlongOffset(Instr auipc, Instr instr_I);
+ static inline Instr SetBranchOffset(int32_t pos, int32_t target_pos,
+ Instr instr) {
+ int32_t imm = target_pos - pos;
+ MOZ_ASSERT((imm & 1) == 0);
+ MOZ_ASSERT(is_intn(imm, kBranchOffsetBits));
+
+ instr &= ~kBImm12Mask;
+ int32_t imm12 = ((imm & 0x800) >> 4) | // bit 11
+ ((imm & 0x1e) << 7) | // bits 4-1
+ ((imm & 0x7e0) << 20) | // bits 10-5
+ ((imm & 0x1000) << 19); // bit 12
+
+ return instr | (imm12 & kBImm12Mask);
+ }
+
+ static inline Instr SetJalOffset(int32_t pos, int32_t target_pos,
+ Instr instr) {
+ MOZ_ASSERT(IsJal(instr));
+ int32_t imm = target_pos - pos;
+ MOZ_ASSERT((imm & 1) == 0);
+ MOZ_ASSERT(is_intn(imm, kJumpOffsetBits));
+
+ instr &= ~kImm20Mask;
+ int32_t imm20 = (imm & 0xff000) | // bits 19-12
+ ((imm & 0x800) << 9) | // bit 11
+ ((imm & 0x7fe) << 20) | // bits 10-1
+ ((imm & 0x100000) << 11); // bit 20
+
+ return instr | (imm20 & kImm20Mask);
+ }
+
+ static inline Instr SetJalrOffset(int32_t offset, Instr instr) {
+ MOZ_ASSERT(IsJalr(instr));
+ MOZ_ASSERT(is_int12(offset));
+ instr &= ~kImm12Mask;
+ int32_t imm12 = offset << kImm12Shift;
+ MOZ_ASSERT(IsJalr(instr | (imm12 & kImm12Mask)));
+ MOZ_ASSERT(JalrOffset(instr | (imm12 & kImm12Mask)) == offset);
+ return instr | (imm12 & kImm12Mask);
+ }
+
+ static inline Instr SetLoadOffset(int32_t offset, Instr instr) {
+#if JS_CODEGEN_RISCV64
+ MOZ_ASSERT(IsLd(instr));
+#elif JS_CODEGEN_RISCV32
+ MOZ_ASSERT(IsLw(instr));
+#endif
+ MOZ_ASSERT(is_int12(offset));
+ instr &= ~kImm12Mask;
+ int32_t imm12 = offset << kImm12Shift;
+ return instr | (imm12 & kImm12Mask);
+ }
+
+ static inline Instr SetAuipcOffset(int32_t offset, Instr instr) {
+ MOZ_ASSERT(IsAuipc(instr));
+ MOZ_ASSERT(is_int20(offset));
+ instr = (instr & ~kImm31_12Mask) | ((offset & kImm19_0Mask) << 12);
+ return instr;
+ }
+
+ // Check if an instruction is a branch of some kind.
+ static bool IsBranch(Instr instr);
+ static bool IsNop(Instr instr);
+ static bool IsJump(Instr instr);
+ static bool IsJal(Instr instr);
+ static bool IsJalr(Instr instr);
+ static bool IsLui(Instr instr);
+ static bool IsAuipc(Instr instr);
+ static bool IsAddi(Instr instr);
+ static bool IsOri(Instr instr);
+ static bool IsSlli(Instr instr);
+ static bool IsLw(Instr instr);
+
+ inline int32_t branch_offset(Label* L) {
+ return branch_offset_helper(L, OffsetSize::kOffset13);
+ }
+ inline int32_t jump_offset(Label* L) {
+ return branch_offset_helper(L, OffsetSize::kOffset21);
+ }
+
+ // Branches
+ void beq(Register rs1, Register rs2, Label* L) {
+ beq(rs1, rs2, branch_offset(L));
+ }
+ void bne(Register rs1, Register rs2, Label* L) {
+ bne(rs1, rs2, branch_offset(L));
+ }
+ void blt(Register rs1, Register rs2, Label* L) {
+ blt(rs1, rs2, branch_offset(L));
+ }
+ void bge(Register rs1, Register rs2, Label* L) {
+ bge(rs1, rs2, branch_offset(L));
+ }
+ void bltu(Register rs1, Register rs2, Label* L) {
+ bltu(rs1, rs2, branch_offset(L));
+ }
+ void bgeu(Register rs1, Register rs2, Label* L) {
+ bgeu(rs1, rs2, branch_offset(L));
+ }
+
+ void beqz(Register rs, int16_t imm13) { beq(rs, zero_reg, imm13); }
+ void beqz(Register rs1, Label* L) { beqz(rs1, branch_offset(L)); }
+ void bnez(Register rs, int16_t imm13) { bne(rs, zero_reg, imm13); }
+ void bnez(Register rs1, Label* L) { bnez(rs1, branch_offset(L)); }
+ void blez(Register rs, int16_t imm13) { bge(zero_reg, rs, imm13); }
+ void blez(Register rs1, Label* L) { blez(rs1, branch_offset(L)); }
+ void bgez(Register rs, int16_t imm13) { bge(rs, zero_reg, imm13); }
+ void bgez(Register rs1, Label* L) { bgez(rs1, branch_offset(L)); }
+ void bltz(Register rs, int16_t imm13) { blt(rs, zero_reg, imm13); }
+ void bltz(Register rs1, Label* L) { bltz(rs1, branch_offset(L)); }
+ void bgtz(Register rs, int16_t imm13) { blt(zero_reg, rs, imm13); }
+
+ void bgtz(Register rs1, Label* L) { bgtz(rs1, branch_offset(L)); }
+ void bgt(Register rs1, Register rs2, int16_t imm13) { blt(rs2, rs1, imm13); }
+ void bgt(Register rs1, Register rs2, Label* L) {
+ bgt(rs1, rs2, branch_offset(L));
+ }
+ void ble(Register rs1, Register rs2, int16_t imm13) { bge(rs2, rs1, imm13); }
+ void ble(Register rs1, Register rs2, Label* L) {
+ ble(rs1, rs2, branch_offset(L));
+ }
+ void bgtu(Register rs1, Register rs2, int16_t imm13) {
+ bltu(rs2, rs1, imm13);
+ }
+ void bgtu(Register rs1, Register rs2, Label* L) {
+ bgtu(rs1, rs2, branch_offset(L));
+ }
+ void bleu(Register rs1, Register rs2, int16_t imm13) {
+ bgeu(rs2, rs1, imm13);
+ }
+ void bleu(Register rs1, Register rs2, Label* L) {
+ bleu(rs1, rs2, branch_offset(L));
+ }
+
+ void j(int32_t imm21) { jal(zero_reg, imm21); }
+ void j(Label* L) { j(jump_offset(L)); }
+ void b(Label* L) { j(L); }
+ void jal(int32_t imm21) { jal(ra, imm21); }
+ void jal(Label* L) { jal(jump_offset(L)); }
+ void jr(Register rs) { jalr(zero_reg, rs, 0); }
+ void jr(Register rs, int32_t imm12) { jalr(zero_reg, rs, imm12); }
+ void jalr(Register rs, int32_t imm12) { jalr(ra, rs, imm12); }
+ void jalr(Register rs) { jalr(ra, rs, 0); }
+ void call(int32_t offset) {
+ auipc(ra, (offset >> 12) + ((offset & 0x800) >> 11));
+ jalr(ra, ra, offset << 20 >> 20);
+ }
+
+ void mv(Register rd, Register rs) { addi(rd, rs, 0); }
+ void not_(Register rd, Register rs) { xori(rd, rs, -1); }
+ void neg(Register rd, Register rs) { sub(rd, zero_reg, rs); }
+ void seqz(Register rd, Register rs) { sltiu(rd, rs, 1); }
+ void snez(Register rd, Register rs) { sltu(rd, zero_reg, rs); }
+ void sltz(Register rd, Register rs) { slt(rd, rs, zero_reg); }
+ void sgtz(Register rd, Register rs) { slt(rd, zero_reg, rs); }
+
+#if JS_CODEGEN_RISCV64
+ void lwu(Register rd, Register rs1, int16_t imm12);
+ void ld(Register rd, Register rs1, int16_t imm12);
+ void sd(Register source, Register base, int16_t imm12);
+ void addiw(Register rd, Register rs1, int16_t imm12);
+ void slliw(Register rd, Register rs1, uint8_t shamt);
+ void srliw(Register rd, Register rs1, uint8_t shamt);
+ void sraiw(Register rd, Register rs1, uint8_t shamt);
+ void addw(Register rd, Register rs1, Register rs2);
+ void subw(Register rd, Register rs1, Register rs2);
+ void sllw(Register rd, Register rs1, Register rs2);
+ void srlw(Register rd, Register rs1, Register rs2);
+ void sraw(Register rd, Register rs1, Register rs2);
+ void negw(Register rd, Register rs) { subw(rd, zero_reg, rs); }
+ void sext_w(Register rd, Register rs) { addiw(rd, rs, 0); }
+
+ static bool IsAddiw(Instr instr);
+ static bool IsLd(Instr instr);
+#endif
+};
+
+} // namespace jit
+} // namespace js
+
+#endif // jit_riscv64_extension_Base_riscv_I_h_
diff --git a/js/src/jit/riscv64/extension/extension-riscv-a.cc b/js/src/jit/riscv64/extension/extension-riscv-a.cc
new file mode 100644
index 0000000000..ead355fc0a
--- /dev/null
+++ b/js/src/jit/riscv64/extension/extension-riscv-a.cc
@@ -0,0 +1,123 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#include "jit/riscv64/extension/extension-riscv-a.h"
+#include "jit/riscv64/Assembler-riscv64.h"
+#include "jit/riscv64/constant/Constant-riscv64.h"
+#include "jit/riscv64/Architecture-riscv64.h"
+namespace js {
+namespace jit {
+
+// RV32A Standard Extension
+void AssemblerRISCVA::lr_w(bool aq, bool rl, Register rd, Register rs1) {
+ GenInstrRAtomic(0b00010, aq, rl, 0b010, rd, rs1, zero_reg);
+}
+
+void AssemblerRISCVA::sc_w(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b00011, aq, rl, 0b010, rd, rs1, rs2);
+}
+
+void AssemblerRISCVA::amoswap_w(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b00001, aq, rl, 0b010, rd, rs1, rs2);
+}
+
+void AssemblerRISCVA::amoadd_w(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b00000, aq, rl, 0b010, rd, rs1, rs2);
+}
+
+void AssemblerRISCVA::amoxor_w(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b00100, aq, rl, 0b010, rd, rs1, rs2);
+}
+
+void AssemblerRISCVA::amoand_w(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b01100, aq, rl, 0b010, rd, rs1, rs2);
+}
+
+void AssemblerRISCVA::amoor_w(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b01000, aq, rl, 0b010, rd, rs1, rs2);
+}
+
+void AssemblerRISCVA::amomin_w(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b10000, aq, rl, 0b010, rd, rs1, rs2);
+}
+
+void AssemblerRISCVA::amomax_w(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b10100, aq, rl, 0b010, rd, rs1, rs2);
+}
+
+void AssemblerRISCVA::amominu_w(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b11000, aq, rl, 0b010, rd, rs1, rs2);
+}
+
+void AssemblerRISCVA::amomaxu_w(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b11100, aq, rl, 0b010, rd, rs1, rs2);
+}
+
+// RV64A Standard Extension (in addition to RV32A)
+#ifdef JS_CODEGEN_RISCV64
+void AssemblerRISCVA::lr_d(bool aq, bool rl, Register rd, Register rs1) {
+ GenInstrRAtomic(0b00010, aq, rl, 0b011, rd, rs1, zero_reg);
+}
+
+void AssemblerRISCVA::sc_d(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b00011, aq, rl, 0b011, rd, rs1, rs2);
+}
+
+void AssemblerRISCVA::amoswap_d(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b00001, aq, rl, 0b011, rd, rs1, rs2);
+}
+
+void AssemblerRISCVA::amoadd_d(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b00000, aq, rl, 0b011, rd, rs1, rs2);
+}
+
+void AssemblerRISCVA::amoxor_d(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b00100, aq, rl, 0b011, rd, rs1, rs2);
+}
+
+void AssemblerRISCVA::amoand_d(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b01100, aq, rl, 0b011, rd, rs1, rs2);
+}
+
+void AssemblerRISCVA::amoor_d(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b01000, aq, rl, 0b011, rd, rs1, rs2);
+}
+
+void AssemblerRISCVA::amomin_d(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b10000, aq, rl, 0b011, rd, rs1, rs2);
+}
+
+void AssemblerRISCVA::amomax_d(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b10100, aq, rl, 0b011, rd, rs1, rs2);
+}
+
+void AssemblerRISCVA::amominu_d(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b11000, aq, rl, 0b011, rd, rs1, rs2);
+}
+
+void AssemblerRISCVA::amomaxu_d(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b11100, aq, rl, 0b011, rd, rs1, rs2);
+}
+#endif
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/riscv64/extension/extension-riscv-a.h b/js/src/jit/riscv64/extension/extension-riscv-a.h
new file mode 100644
index 0000000000..442a4f5bba
--- /dev/null
+++ b/js/src/jit/riscv64/extension/extension-riscv-a.h
@@ -0,0 +1,46 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file."
+#ifndef jit_riscv64_extension_Extension_riscv_a_h_
+#define jit_riscv64_extension_Extension_riscv_a_h_
+#include "mozilla/Assertions.h"
+
+#include <stdint.h>
+
+#include "jit/riscv64/extension/base-assembler-riscv.h"
+#include "jit/riscv64/Register-riscv64.h"
+namespace js {
+namespace jit {
+class AssemblerRISCVA : public AssemblerRiscvBase {
+ // RV32A Standard Extension
+ public:
+ void lr_w(bool aq, bool rl, Register rd, Register rs1);
+ void sc_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amoswap_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amoadd_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amoxor_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amoand_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amoor_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amomin_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amomax_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amominu_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amomaxu_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+
+#ifdef JS_CODEGEN_RISCV64
+ // RV64A Standard Extension (in addition to RV32A)
+ void lr_d(bool aq, bool rl, Register rd, Register rs1);
+ void sc_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amoswap_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amoadd_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amoxor_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amoand_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amoor_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amomin_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amomax_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amominu_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amomaxu_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+#endif
+};
+} // namespace jit
+} // namespace js
+#endif // jit_riscv64_extension_Extension_riscv_A_h_
diff --git a/js/src/jit/riscv64/extension/extension-riscv-c.cc b/js/src/jit/riscv64/extension/extension-riscv-c.cc
new file mode 100644
index 0000000000..714753a0e0
--- /dev/null
+++ b/js/src/jit/riscv64/extension/extension-riscv-c.cc
@@ -0,0 +1,275 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#include "jit/riscv64/extension/extension-riscv-c.h"
+#include "jit/riscv64/Assembler-riscv64.h"
+#include "jit/riscv64/constant/Constant-riscv64.h"
+#include "jit/riscv64/Architecture-riscv64.h"
+namespace js {
+namespace jit {
+// RV64C Standard Extension
+void AssemblerRISCVC::c_nop() { GenInstrCI(0b000, C1, zero_reg, 0); }
+
+void AssemblerRISCVC::c_addi(Register rd, int8_t imm6) {
+ MOZ_ASSERT(rd != zero_reg && imm6 != 0);
+ GenInstrCI(0b000, C1, rd, imm6);
+}
+
+#ifdef JS_CODEGEN_RISCV64
+void AssemblerRISCVC::c_addiw(Register rd, int8_t imm6) {
+ MOZ_ASSERT(rd != zero_reg);
+ GenInstrCI(0b001, C1, rd, imm6);
+}
+#endif
+
+void AssemblerRISCVC::c_addi16sp(int16_t imm10) {
+ MOZ_ASSERT(is_int10(imm10) && (imm10 & 0xf) == 0);
+ uint8_t uimm6 = ((imm10 & 0x200) >> 4) | (imm10 & 0x10) |
+ ((imm10 & 0x40) >> 3) | ((imm10 & 0x180) >> 6) |
+ ((imm10 & 0x20) >> 5);
+ GenInstrCIU(0b011, C1, sp, uimm6);
+}
+
+void AssemblerRISCVC::c_addi4spn(Register rd, int16_t uimm10) {
+ MOZ_ASSERT(is_uint10(uimm10) && (uimm10 != 0));
+ uint8_t uimm8 = ((uimm10 & 0x4) >> 1) | ((uimm10 & 0x8) >> 3) |
+ ((uimm10 & 0x30) << 2) | ((uimm10 & 0x3c0) >> 4);
+ GenInstrCIW(0b000, C0, rd, uimm8);
+}
+
+void AssemblerRISCVC::c_li(Register rd, int8_t imm6) {
+ MOZ_ASSERT(rd != zero_reg);
+ GenInstrCI(0b010, C1, rd, imm6);
+}
+
+void AssemblerRISCVC::c_lui(Register rd, int8_t imm6) {
+ MOZ_ASSERT(rd != zero_reg && rd != sp && imm6 != 0);
+ GenInstrCI(0b011, C1, rd, imm6);
+}
+
+void AssemblerRISCVC::c_slli(Register rd, uint8_t shamt6) {
+ MOZ_ASSERT(rd != zero_reg && shamt6 != 0);
+ GenInstrCIU(0b000, C2, rd, shamt6);
+}
+
+void AssemblerRISCVC::c_fldsp(FPURegister rd, uint16_t uimm9) {
+ MOZ_ASSERT(is_uint9(uimm9) && (uimm9 & 0x7) == 0);
+ uint8_t uimm6 = (uimm9 & 0x38) | ((uimm9 & 0x1c0) >> 6);
+ GenInstrCIU(0b001, C2, rd, uimm6);
+}
+
+#ifdef JS_CODEGEN_RISCV64
+void AssemblerRISCVC::c_ldsp(Register rd, uint16_t uimm9) {
+ MOZ_ASSERT(rd != zero_reg && is_uint9(uimm9) && (uimm9 & 0x7) == 0);
+ uint8_t uimm6 = (uimm9 & 0x38) | ((uimm9 & 0x1c0) >> 6);
+ GenInstrCIU(0b011, C2, rd, uimm6);
+}
+#endif
+
+void AssemblerRISCVC::c_lwsp(Register rd, uint16_t uimm8) {
+ MOZ_ASSERT(rd != zero_reg && is_uint8(uimm8) && (uimm8 & 0x3) == 0);
+ uint8_t uimm6 = (uimm8 & 0x3c) | ((uimm8 & 0xc0) >> 6);
+ GenInstrCIU(0b010, C2, rd, uimm6);
+}
+
+void AssemblerRISCVC::c_jr(Register rs1) {
+ MOZ_ASSERT(rs1 != zero_reg);
+ GenInstrCR(0b1000, C2, rs1, zero_reg);
+}
+
+void AssemblerRISCVC::c_mv(Register rd, Register rs2) {
+ MOZ_ASSERT(rd != zero_reg && rs2 != zero_reg);
+ GenInstrCR(0b1000, C2, rd, rs2);
+}
+
+void AssemblerRISCVC::c_ebreak() { GenInstrCR(0b1001, C2, zero_reg, zero_reg); }
+
+void AssemblerRISCVC::c_jalr(Register rs1) {
+ MOZ_ASSERT(rs1 != zero_reg);
+ GenInstrCR(0b1001, C2, rs1, zero_reg);
+}
+
+void AssemblerRISCVC::c_add(Register rd, Register rs2) {
+ MOZ_ASSERT(rd != zero_reg && rs2 != zero_reg);
+ GenInstrCR(0b1001, C2, rd, rs2);
+}
+
+// CA Instructions
+void AssemblerRISCVC::c_sub(Register rd, Register rs2) {
+ MOZ_ASSERT(((rd.code() & 0b11000) == 0b01000) &&
+ ((rs2.code() & 0b11000) == 0b01000));
+ GenInstrCA(0b100011, C1, rd, 0b00, rs2);
+}
+
+void AssemblerRISCVC::c_xor(Register rd, Register rs2) {
+ MOZ_ASSERT(((rd.code() & 0b11000) == 0b01000) &&
+ ((rs2.code() & 0b11000) == 0b01000));
+ GenInstrCA(0b100011, C1, rd, 0b01, rs2);
+}
+
+void AssemblerRISCVC::c_or(Register rd, Register rs2) {
+ MOZ_ASSERT(((rd.code() & 0b11000) == 0b01000) &&
+ ((rs2.code() & 0b11000) == 0b01000));
+ GenInstrCA(0b100011, C1, rd, 0b10, rs2);
+}
+
+void AssemblerRISCVC::c_and(Register rd, Register rs2) {
+ MOZ_ASSERT(((rd.code() & 0b11000) == 0b01000) &&
+ ((rs2.code() & 0b11000) == 0b01000));
+ GenInstrCA(0b100011, C1, rd, 0b11, rs2);
+}
+
+#ifdef JS_CODEGEN_RISCV64
+void AssemblerRISCVC::c_subw(Register rd, Register rs2) {
+ MOZ_ASSERT(((rd.code() & 0b11000) == 0b01000) &&
+ ((rs2.code() & 0b11000) == 0b01000));
+ GenInstrCA(0b100111, C1, rd, 0b00, rs2);
+}
+
+void AssemblerRISCVC::c_addw(Register rd, Register rs2) {
+ MOZ_ASSERT(((rd.code() & 0b11000) == 0b01000) &&
+ ((rs2.code() & 0b11000) == 0b01000));
+ GenInstrCA(0b100111, C1, rd, 0b01, rs2);
+}
+#endif
+
+void AssemblerRISCVC::c_swsp(Register rs2, uint16_t uimm8) {
+ MOZ_ASSERT(is_uint8(uimm8) && (uimm8 & 0x3) == 0);
+ uint8_t uimm6 = (uimm8 & 0x3c) | ((uimm8 & 0xc0) >> 6);
+ GenInstrCSS(0b110, C2, rs2, uimm6);
+}
+
+#ifdef JS_CODEGEN_RISCV64
+void AssemblerRISCVC::c_sdsp(Register rs2, uint16_t uimm9) {
+ MOZ_ASSERT(is_uint9(uimm9) && (uimm9 & 0x7) == 0);
+ uint8_t uimm6 = (uimm9 & 0x38) | ((uimm9 & 0x1c0) >> 6);
+ GenInstrCSS(0b111, C2, rs2, uimm6);
+}
+#endif
+
+void AssemblerRISCVC::c_fsdsp(FPURegister rs2, uint16_t uimm9) {
+ MOZ_ASSERT(is_uint9(uimm9) && (uimm9 & 0x7) == 0);
+ uint8_t uimm6 = (uimm9 & 0x38) | ((uimm9 & 0x1c0) >> 6);
+ GenInstrCSS(0b101, C2, rs2, uimm6);
+}
+
+// CL Instructions
+
+void AssemblerRISCVC::c_lw(Register rd, Register rs1, uint16_t uimm7) {
+ MOZ_ASSERT(((rd.code() & 0b11000) == 0b01000) &&
+ ((rs1.code() & 0b11000) == 0b01000) && is_uint7(uimm7) &&
+ ((uimm7 & 0x3) == 0));
+ uint8_t uimm5 =
+ ((uimm7 & 0x4) >> 1) | ((uimm7 & 0x40) >> 6) | ((uimm7 & 0x38) >> 1);
+ GenInstrCL(0b010, C0, rd, rs1, uimm5);
+}
+
+#ifdef JS_CODEGEN_RISCV64
+void AssemblerRISCVC::c_ld(Register rd, Register rs1, uint16_t uimm8) {
+ MOZ_ASSERT(((rd.code() & 0b11000) == 0b01000) &&
+ ((rs1.code() & 0b11000) == 0b01000) && is_uint8(uimm8) &&
+ ((uimm8 & 0x7) == 0));
+ uint8_t uimm5 = ((uimm8 & 0x38) >> 1) | ((uimm8 & 0xc0) >> 6);
+ GenInstrCL(0b011, C0, rd, rs1, uimm5);
+}
+#endif
+
+void AssemblerRISCVC::c_fld(FPURegister rd, Register rs1, uint16_t uimm8) {
+ MOZ_ASSERT(((rd.code() & 0b11000) == 0b01000) &&
+ ((rs1.code() & 0b11000) == 0b01000) && is_uint8(uimm8) &&
+ ((uimm8 & 0x7) == 0));
+ uint8_t uimm5 = ((uimm8 & 0x38) >> 1) | ((uimm8 & 0xc0) >> 6);
+ GenInstrCL(0b001, C0, rd, rs1, uimm5);
+}
+
+// CS Instructions
+
+void AssemblerRISCVC::c_sw(Register rs2, Register rs1, uint16_t uimm7) {
+ MOZ_ASSERT(((rs2.code() & 0b11000) == 0b01000) &&
+ ((rs1.code() & 0b11000) == 0b01000) && is_uint7(uimm7) &&
+ ((uimm7 & 0x3) == 0));
+ uint8_t uimm5 =
+ ((uimm7 & 0x4) >> 1) | ((uimm7 & 0x40) >> 6) | ((uimm7 & 0x38) >> 1);
+ GenInstrCS(0b110, C0, rs2, rs1, uimm5);
+}
+
+#ifdef JS_CODEGEN_RISCV64
+void AssemblerRISCVC::c_sd(Register rs2, Register rs1, uint16_t uimm8) {
+ MOZ_ASSERT(((rs2.code() & 0b11000) == 0b01000) &&
+ ((rs1.code() & 0b11000) == 0b01000) && is_uint8(uimm8) &&
+ ((uimm8 & 0x7) == 0));
+ uint8_t uimm5 = ((uimm8 & 0x38) >> 1) | ((uimm8 & 0xc0) >> 6);
+ GenInstrCS(0b111, C0, rs2, rs1, uimm5);
+}
+#endif
+
+void AssemblerRISCVC::c_fsd(FPURegister rs2, Register rs1, uint16_t uimm8) {
+ MOZ_ASSERT(((rs2.code() & 0b11000) == 0b01000) &&
+ ((rs1.code() & 0b11000) == 0b01000) && is_uint8(uimm8) &&
+ ((uimm8 & 0x7) == 0));
+ uint8_t uimm5 = ((uimm8 & 0x38) >> 1) | ((uimm8 & 0xc0) >> 6);
+ GenInstrCS(0b101, C0, rs2, rs1, uimm5);
+}
+
+// CJ Instructions
+
+void AssemblerRISCVC::c_j(int16_t imm12) {
+ MOZ_ASSERT(is_int12(imm12));
+ int16_t uimm11 = ((imm12 & 0x800) >> 1) | ((imm12 & 0x400) >> 4) |
+ ((imm12 & 0x300) >> 1) | ((imm12 & 0x80) >> 3) |
+ ((imm12 & 0x40) >> 1) | ((imm12 & 0x20) >> 5) |
+ ((imm12 & 0x10) << 5) | (imm12 & 0xe);
+ GenInstrCJ(0b101, C1, uimm11);
+}
+
+// CB Instructions
+
+void AssemblerRISCVC::c_bnez(Register rs1, int16_t imm9) {
+ MOZ_ASSERT(((rs1.code() & 0b11000) == 0b01000) && is_int9(imm9));
+ uint8_t uimm8 = ((imm9 & 0x20) >> 5) | ((imm9 & 0x6)) | ((imm9 & 0xc0) >> 3) |
+ ((imm9 & 0x18) << 2) | ((imm9 & 0x100) >> 1);
+ GenInstrCB(0b111, C1, rs1, uimm8);
+}
+
+void AssemblerRISCVC::c_beqz(Register rs1, int16_t imm9) {
+ MOZ_ASSERT(((rs1.code() & 0b11000) == 0b01000) && is_int9(imm9));
+ uint8_t uimm8 = ((imm9 & 0x20) >> 5) | ((imm9 & 0x6)) | ((imm9 & 0xc0) >> 3) |
+ ((imm9 & 0x18) << 2) | ((imm9 & 0x100) >> 1);
+ GenInstrCB(0b110, C1, rs1, uimm8);
+}
+
+void AssemblerRISCVC::c_srli(Register rs1, int8_t shamt6) {
+ MOZ_ASSERT(((rs1.code() & 0b11000) == 0b01000) && is_int6(shamt6));
+ GenInstrCBA(0b100, 0b00, C1, rs1, shamt6);
+}
+
+void AssemblerRISCVC::c_srai(Register rs1, int8_t shamt6) {
+ MOZ_ASSERT(((rs1.code() & 0b11000) == 0b01000) && is_int6(shamt6));
+ GenInstrCBA(0b100, 0b01, C1, rs1, shamt6);
+}
+
+void AssemblerRISCVC::c_andi(Register rs1, int8_t imm6) {
+ MOZ_ASSERT(((rs1.code() & 0b11000) == 0b01000) && is_int6(imm6));
+ GenInstrCBA(0b100, 0b10, C1, rs1, imm6);
+}
+
+bool AssemblerRISCVC::IsCJal(Instr instr) {
+ return (instr & kRvcOpcodeMask) == RO_C_J;
+}
+
+bool AssemblerRISCVC::IsCBranch(Instr instr) {
+ int Op = instr & kRvcOpcodeMask;
+ return Op == RO_C_BNEZ || Op == RO_C_BEQZ;
+}
+
+int AssemblerRISCVC::CJumpOffset(Instr instr) {
+ int32_t imm12 = ((instr & 0x4) << 3) | ((instr & 0x38) >> 2) |
+ ((instr & 0x40) << 1) | ((instr & 0x80) >> 1) |
+ ((instr & 0x100) << 2) | ((instr & 0x600) >> 1) |
+ ((instr & 0x800) >> 7) | ((instr & 0x1000) >> 1);
+ imm12 = imm12 << 20 >> 20;
+ return imm12;
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/riscv64/extension/extension-riscv-c.h b/js/src/jit/riscv64/extension/extension-riscv-c.h
new file mode 100644
index 0000000000..655141cb30
--- /dev/null
+++ b/js/src/jit/riscv64/extension/extension-riscv-c.h
@@ -0,0 +1,77 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#ifndef jit_riscv64_extension_Extension_riscv_c_h_
+#define jit_riscv64_extension_Extension_riscv_c_h_
+#include "mozilla/Assertions.h"
+
+#include <stdint.h>
+
+#include "jit/riscv64/extension/base-assembler-riscv.h"
+#include "jit/riscv64/Register-riscv64.h"
+namespace js {
+namespace jit {
+class AssemblerRISCVC : public AssemblerRiscvBase {
+ // RV64C Standard Extension
+ public:
+ void c_nop();
+ void c_addi(Register rd, int8_t imm6);
+
+ void c_addi16sp(int16_t imm10);
+ void c_addi4spn(Register rd, int16_t uimm10);
+ void c_li(Register rd, int8_t imm6);
+ void c_lui(Register rd, int8_t imm6);
+ void c_slli(Register rd, uint8_t shamt6);
+ void c_lwsp(Register rd, uint16_t uimm8);
+ void c_jr(Register rs1);
+ void c_mv(Register rd, Register rs2);
+ void c_ebreak();
+ void c_jalr(Register rs1);
+ void c_j(int16_t imm12);
+ void c_add(Register rd, Register rs2);
+ void c_sub(Register rd, Register rs2);
+ void c_and(Register rd, Register rs2);
+ void c_xor(Register rd, Register rs2);
+ void c_or(Register rd, Register rs2);
+ void c_swsp(Register rs2, uint16_t uimm8);
+ void c_lw(Register rd, Register rs1, uint16_t uimm7);
+ void c_sw(Register rs2, Register rs1, uint16_t uimm7);
+ void c_bnez(Register rs1, int16_t imm9);
+ void c_beqz(Register rs1, int16_t imm9);
+ void c_srli(Register rs1, int8_t shamt6);
+ void c_srai(Register rs1, int8_t shamt6);
+ void c_andi(Register rs1, int8_t imm6);
+
+ void c_fld(FPURegister rd, Register rs1, uint16_t uimm8);
+ void c_fsd(FPURegister rs2, Register rs1, uint16_t uimm8);
+ void c_fldsp(FPURegister rd, uint16_t uimm9);
+ void c_fsdsp(FPURegister rs2, uint16_t uimm9);
+#ifdef JS_CODEGEN_RISCV64
+ void c_ld(Register rd, Register rs1, uint16_t uimm8);
+ void c_sd(Register rs2, Register rs1, uint16_t uimm8);
+ void c_subw(Register rd, Register rs2);
+ void c_addw(Register rd, Register rs2);
+ void c_addiw(Register rd, int8_t imm6);
+ void c_ldsp(Register rd, uint16_t uimm9);
+ void c_sdsp(Register rs2, uint16_t uimm9);
+#endif
+
+ int CJumpOffset(Instr instr);
+
+ static bool IsCBranch(Instr instr);
+ static bool IsCJal(Instr instr);
+
+ inline int16_t cjump_offset(Label* L) {
+ return (int16_t)branch_offset_helper(L, OffsetSize::kOffset11);
+ }
+ inline int32_t cbranch_offset(Label* L) {
+ return branch_offset_helper(L, OffsetSize::kOffset9);
+ }
+
+ void c_j(Label* L) { c_j(cjump_offset(L)); }
+ void c_bnez(Register rs1, Label* L) { c_bnez(rs1, cbranch_offset(L)); }
+ void c_beqz(Register rs1, Label* L) { c_beqz(rs1, cbranch_offset(L)); }
+};
+} // namespace jit
+} // namespace js
+#endif // jit_riscv64_extension_Extension_riscv_C_h_
diff --git a/js/src/jit/riscv64/extension/extension-riscv-d.cc b/js/src/jit/riscv64/extension/extension-riscv-d.cc
new file mode 100644
index 0000000000..cb728baf12
--- /dev/null
+++ b/js/src/jit/riscv64/extension/extension-riscv-d.cc
@@ -0,0 +1,167 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#include "jit/riscv64/extension/extension-riscv-d.h"
+#include "jit/riscv64/Assembler-riscv64.h"
+#include "jit/riscv64/constant/Constant-riscv64.h"
+#include "jit/riscv64/Architecture-riscv64.h"
+namespace js {
+namespace jit {
+// RV32D Standard Extension
+
+void AssemblerRISCVD::fld(FPURegister rd, Register rs1, int16_t imm12) {
+ GenInstrLoadFP_ri(0b011, rd, rs1, imm12);
+}
+
+void AssemblerRISCVD::fsd(FPURegister source, Register base, int16_t imm12) {
+ GenInstrStoreFP_rri(0b011, base, source, imm12);
+}
+
+void AssemblerRISCVD::fmadd_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, FPURoundingMode frm) {
+ GenInstrR4(0b01, MADD, rd, rs1, rs2, rs3, frm);
+}
+
+void AssemblerRISCVD::fmsub_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, FPURoundingMode frm) {
+ GenInstrR4(0b01, MSUB, rd, rs1, rs2, rs3, frm);
+}
+
+void AssemblerRISCVD::fnmsub_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, FPURoundingMode frm) {
+ GenInstrR4(0b01, NMSUB, rd, rs1, rs2, rs3, frm);
+}
+
+void AssemblerRISCVD::fnmadd_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, FPURoundingMode frm) {
+ GenInstrR4(0b01, NMADD, rd, rs1, rs2, rs3, frm);
+}
+
+void AssemblerRISCVD::fadd_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b0000001, frm, rd, rs1, rs2);
+}
+
+void AssemblerRISCVD::fsub_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b0000101, frm, rd, rs1, rs2);
+}
+
+void AssemblerRISCVD::fmul_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b0001001, frm, rd, rs1, rs2);
+}
+
+void AssemblerRISCVD::fdiv_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b0001101, frm, rd, rs1, rs2);
+}
+
+void AssemblerRISCVD::fsqrt_d(FPURegister rd, FPURegister rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b0101101, frm, rd, rs1, zero_reg);
+}
+
+void AssemblerRISCVD::fsgnj_d(FPURegister rd, FPURegister rs1,
+ FPURegister rs2) {
+ GenInstrALUFP_rr(0b0010001, 0b000, rd, rs1, rs2);
+}
+
+void AssemblerRISCVD::fsgnjn_d(FPURegister rd, FPURegister rs1,
+ FPURegister rs2) {
+ GenInstrALUFP_rr(0b0010001, 0b001, rd, rs1, rs2);
+}
+
+void AssemblerRISCVD::fsgnjx_d(FPURegister rd, FPURegister rs1,
+ FPURegister rs2) {
+ GenInstrALUFP_rr(0b0010001, 0b010, rd, rs1, rs2);
+}
+
+void AssemblerRISCVD::fmin_d(FPURegister rd, FPURegister rs1, FPURegister rs2) {
+ GenInstrALUFP_rr(0b0010101, 0b000, rd, rs1, rs2);
+}
+
+void AssemblerRISCVD::fmax_d(FPURegister rd, FPURegister rs1, FPURegister rs2) {
+ GenInstrALUFP_rr(0b0010101, 0b001, rd, rs1, rs2);
+}
+
+void AssemblerRISCVD::fcvt_s_d(FPURegister rd, FPURegister rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b0100000, frm, rd, rs1, ToRegister(1));
+}
+
+void AssemblerRISCVD::fcvt_d_s(FPURegister rd, FPURegister rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b0100001, frm, rd, rs1, zero_reg);
+}
+
+void AssemblerRISCVD::feq_d(Register rd, FPURegister rs1, FPURegister rs2) {
+ GenInstrALUFP_rr(0b1010001, 0b010, rd, rs1, rs2);
+}
+
+void AssemblerRISCVD::flt_d(Register rd, FPURegister rs1, FPURegister rs2) {
+ GenInstrALUFP_rr(0b1010001, 0b001, rd, rs1, rs2);
+}
+
+void AssemblerRISCVD::fle_d(Register rd, FPURegister rs1, FPURegister rs2) {
+ GenInstrALUFP_rr(0b1010001, 0b000, rd, rs1, rs2);
+}
+
+void AssemblerRISCVD::fclass_d(Register rd, FPURegister rs1) {
+ GenInstrALUFP_rr(0b1110001, 0b001, rd, rs1, zero_reg);
+}
+
+void AssemblerRISCVD::fcvt_w_d(Register rd, FPURegister rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b1100001, frm, rd, rs1, zero_reg);
+}
+
+void AssemblerRISCVD::fcvt_wu_d(Register rd, FPURegister rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b1100001, frm, rd, rs1, ToRegister(1));
+}
+
+void AssemblerRISCVD::fcvt_d_w(FPURegister rd, Register rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b1101001, frm, rd, rs1, zero_reg);
+}
+
+void AssemblerRISCVD::fcvt_d_wu(FPURegister rd, Register rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b1101001, frm, rd, rs1, ToRegister(1));
+}
+
+#ifdef JS_CODEGEN_RISCV64
+// RV64D Standard Extension (in addition to RV32D)
+
+void AssemblerRISCVD::fcvt_l_d(Register rd, FPURegister rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b1100001, frm, rd, rs1, ToRegister(2));
+}
+
+void AssemblerRISCVD::fcvt_lu_d(Register rd, FPURegister rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b1100001, frm, rd, rs1, ToRegister(3));
+}
+
+void AssemblerRISCVD::fmv_x_d(Register rd, FPURegister rs1) {
+ GenInstrALUFP_rr(0b1110001, 0b000, rd, rs1, zero_reg);
+}
+
+void AssemblerRISCVD::fcvt_d_l(FPURegister rd, Register rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b1101001, frm, rd, rs1, ToRegister(2));
+}
+
+void AssemblerRISCVD::fcvt_d_lu(FPURegister rd, Register rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b1101001, frm, rd, rs1, ToRegister(3));
+}
+
+void AssemblerRISCVD::fmv_d_x(FPURegister rd, Register rs1) {
+ GenInstrALUFP_rr(0b1111001, 0b000, rd, rs1, zero_reg);
+}
+#endif
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/riscv64/extension/extension-riscv-d.h b/js/src/jit/riscv64/extension/extension-riscv-d.h
new file mode 100644
index 0000000000..8497c0ca63
--- /dev/null
+++ b/js/src/jit/riscv64/extension/extension-riscv-d.h
@@ -0,0 +1,68 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#ifndef jit_riscv64_extension_Extension_riscv_d_h_
+#define jit_riscv64_extension_Extension_riscv_d_h_
+#include "mozilla/Assertions.h"
+
+#include <stdint.h>
+
+#include "jit/riscv64/extension/base-assembler-riscv.h"
+#include "jit/riscv64/Register-riscv64.h"
+namespace js {
+namespace jit {
+class AssemblerRISCVD : public AssemblerRiscvBase {
+ // RV32D Standard Extension
+ public:
+ void fld(FPURegister rd, Register rs1, int16_t imm12);
+ void fsd(FPURegister source, Register base, int16_t imm12);
+ void fmadd_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, FPURoundingMode frm = RNE);
+ void fmsub_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, FPURoundingMode frm = RNE);
+ void fnmsub_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, FPURoundingMode frm = RNE);
+ void fnmadd_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, FPURoundingMode frm = RNE);
+ void fadd_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURoundingMode frm = RNE);
+ void fsub_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURoundingMode frm = RNE);
+ void fmul_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURoundingMode frm = RNE);
+ void fdiv_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURoundingMode frm = RNE);
+ void fsqrt_d(FPURegister rd, FPURegister rs1, FPURoundingMode frm = RNE);
+ void fsgnj_d(FPURegister rd, FPURegister rs1, FPURegister rs2);
+ void fsgnjn_d(FPURegister rd, FPURegister rs1, FPURegister rs2);
+ void fsgnjx_d(FPURegister rd, FPURegister rs1, FPURegister rs2);
+ void fmin_d(FPURegister rd, FPURegister rs1, FPURegister rs2);
+ void fmax_d(FPURegister rd, FPURegister rs1, FPURegister rs2);
+ void fcvt_s_d(FPURegister rd, FPURegister rs1, FPURoundingMode frm = RNE);
+ void fcvt_d_s(FPURegister rd, FPURegister rs1, FPURoundingMode frm = RNE);
+ void feq_d(Register rd, FPURegister rs1, FPURegister rs2);
+ void flt_d(Register rd, FPURegister rs1, FPURegister rs2);
+ void fle_d(Register rd, FPURegister rs1, FPURegister rs2);
+ void fclass_d(Register rd, FPURegister rs1);
+ void fcvt_w_d(Register rd, FPURegister rs1, FPURoundingMode frm = RNE);
+ void fcvt_wu_d(Register rd, FPURegister rs1, FPURoundingMode frm = RNE);
+ void fcvt_d_w(FPURegister rd, Register rs1, FPURoundingMode frm = RNE);
+ void fcvt_d_wu(FPURegister rd, Register rs1, FPURoundingMode frm = RNE);
+
+#ifdef JS_CODEGEN_RISCV64
+ // RV64D Standard Extension (in addition to RV32D)
+ void fcvt_l_d(Register rd, FPURegister rs1, FPURoundingMode frm = RNE);
+ void fcvt_lu_d(Register rd, FPURegister rs1, FPURoundingMode frm = RNE);
+ void fmv_x_d(Register rd, FPURegister rs1);
+ void fcvt_d_l(FPURegister rd, Register rs1, FPURoundingMode frm = RNE);
+ void fcvt_d_lu(FPURegister rd, Register rs1, FPURoundingMode frm = RNE);
+ void fmv_d_x(FPURegister rd, Register rs1);
+#endif
+
+ void fmv_d(FPURegister rd, FPURegister rs) { fsgnj_d(rd, rs, rs); }
+ void fabs_d(FPURegister rd, FPURegister rs) { fsgnjx_d(rd, rs, rs); }
+ void fneg_d(FPURegister rd, FPURegister rs) { fsgnjn_d(rd, rs, rs); }
+};
+} // namespace jit
+} // namespace js
+#endif // jit_riscv64_extension_Extension_riscv_D_h_
diff --git a/js/src/jit/riscv64/extension/extension-riscv-f.cc b/js/src/jit/riscv64/extension/extension-riscv-f.cc
new file mode 100644
index 0000000000..44e1fdc495
--- /dev/null
+++ b/js/src/jit/riscv64/extension/extension-riscv-f.cc
@@ -0,0 +1,158 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#include "jit/riscv64/extension/extension-riscv-f.h"
+#include "jit/riscv64/Assembler-riscv64.h"
+#include "jit/riscv64/constant/Constant-riscv64.h"
+#include "jit/riscv64/Architecture-riscv64.h"
+namespace js {
+namespace jit {
+
+// RV32F Standard Extension
+
+void AssemblerRISCVF::flw(FPURegister rd, Register rs1, int16_t imm12) {
+ GenInstrLoadFP_ri(0b010, rd, rs1, imm12);
+}
+
+void AssemblerRISCVF::fsw(FPURegister source, Register base, int16_t imm12) {
+ GenInstrStoreFP_rri(0b010, base, source, imm12);
+}
+
+void AssemblerRISCVF::fmadd_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, FPURoundingMode frm) {
+ GenInstrR4(0b00, MADD, rd, rs1, rs2, rs3, frm);
+}
+
+void AssemblerRISCVF::fmsub_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, FPURoundingMode frm) {
+ GenInstrR4(0b00, MSUB, rd, rs1, rs2, rs3, frm);
+}
+
+void AssemblerRISCVF::fnmsub_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, FPURoundingMode frm) {
+ GenInstrR4(0b00, NMSUB, rd, rs1, rs2, rs3, frm);
+}
+
+void AssemblerRISCVF::fnmadd_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, FPURoundingMode frm) {
+ GenInstrR4(0b00, NMADD, rd, rs1, rs2, rs3, frm);
+}
+
+void AssemblerRISCVF::fadd_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b0000000, frm, rd, rs1, rs2);
+}
+
+void AssemblerRISCVF::fsub_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b0000100, frm, rd, rs1, rs2);
+}
+
+void AssemblerRISCVF::fmul_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b0001000, frm, rd, rs1, rs2);
+}
+
+void AssemblerRISCVF::fdiv_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b0001100, frm, rd, rs1, rs2);
+}
+
+void AssemblerRISCVF::fsqrt_s(FPURegister rd, FPURegister rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b0101100, frm, rd, rs1, zero_reg);
+}
+
+void AssemblerRISCVF::fsgnj_s(FPURegister rd, FPURegister rs1,
+ FPURegister rs2) {
+ GenInstrALUFP_rr(0b0010000, 0b000, rd, rs1, rs2);
+}
+
+void AssemblerRISCVF::fsgnjn_s(FPURegister rd, FPURegister rs1,
+ FPURegister rs2) {
+ GenInstrALUFP_rr(0b0010000, 0b001, rd, rs1, rs2);
+}
+
+void AssemblerRISCVF::fsgnjx_s(FPURegister rd, FPURegister rs1,
+ FPURegister rs2) {
+ GenInstrALUFP_rr(0b0010000, 0b010, rd, rs1, rs2);
+}
+
+void AssemblerRISCVF::fmin_s(FPURegister rd, FPURegister rs1, FPURegister rs2) {
+ GenInstrALUFP_rr(0b0010100, 0b000, rd, rs1, rs2);
+}
+
+void AssemblerRISCVF::fmax_s(FPURegister rd, FPURegister rs1, FPURegister rs2) {
+ GenInstrALUFP_rr(0b0010100, 0b001, rd, rs1, rs2);
+}
+
+void AssemblerRISCVF::fcvt_w_s(Register rd, FPURegister rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b1100000, frm, rd, rs1, zero_reg);
+}
+
+void AssemblerRISCVF::fcvt_wu_s(Register rd, FPURegister rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b1100000, frm, rd, rs1, ToRegister(1));
+}
+
+void AssemblerRISCVF::fmv_x_w(Register rd, FPURegister rs1) {
+ GenInstrALUFP_rr(0b1110000, 0b000, rd, rs1, zero_reg);
+}
+
+void AssemblerRISCVF::feq_s(Register rd, FPURegister rs1, FPURegister rs2) {
+ GenInstrALUFP_rr(0b1010000, 0b010, rd, rs1, rs2);
+}
+
+void AssemblerRISCVF::flt_s(Register rd, FPURegister rs1, FPURegister rs2) {
+ GenInstrALUFP_rr(0b1010000, 0b001, rd, rs1, rs2);
+}
+
+void AssemblerRISCVF::fle_s(Register rd, FPURegister rs1, FPURegister rs2) {
+ GenInstrALUFP_rr(0b1010000, 0b000, rd, rs1, rs2);
+}
+
+void AssemblerRISCVF::fclass_s(Register rd, FPURegister rs1) {
+ GenInstrALUFP_rr(0b1110000, 0b001, rd, rs1, zero_reg);
+}
+
+void AssemblerRISCVF::fcvt_s_w(FPURegister rd, Register rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b1101000, frm, rd, rs1, zero_reg);
+}
+
+void AssemblerRISCVF::fcvt_s_wu(FPURegister rd, Register rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b1101000, frm, rd, rs1, ToRegister(1));
+}
+
+void AssemblerRISCVF::fmv_w_x(FPURegister rd, Register rs1) {
+ GenInstrALUFP_rr(0b1111000, 0b000, rd, rs1, zero_reg);
+}
+
+#ifdef JS_CODEGEN_RISCV64
+// RV64F Standard Extension (in addition to RV32F)
+
+void AssemblerRISCVF::fcvt_l_s(Register rd, FPURegister rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b1100000, frm, rd, rs1, ToRegister(2));
+}
+
+void AssemblerRISCVF::fcvt_lu_s(Register rd, FPURegister rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b1100000, frm, rd, rs1, ToRegister(3));
+}
+
+void AssemblerRISCVF::fcvt_s_l(FPURegister rd, Register rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b1101000, frm, rd, rs1, ToRegister(2));
+}
+
+void AssemblerRISCVF::fcvt_s_lu(FPURegister rd, Register rs1,
+ FPURoundingMode frm) {
+ GenInstrALUFP_rr(0b1101000, frm, rd, rs1, ToRegister(3));
+}
+#endif
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/riscv64/extension/extension-riscv-f.h b/js/src/jit/riscv64/extension/extension-riscv-f.h
new file mode 100644
index 0000000000..3ab46ffcf6
--- /dev/null
+++ b/js/src/jit/riscv64/extension/extension-riscv-f.h
@@ -0,0 +1,66 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#ifndef jit_riscv64_extension_Extension_riscv_f_h_
+#define jit_riscv64_extension_Extension_riscv_f_h_
+#include "mozilla/Assertions.h"
+
+#include <stdint.h>
+
+#include "jit/riscv64/extension/base-assembler-riscv.h"
+#include "jit/riscv64/Register-riscv64.h"
+namespace js {
+namespace jit {
+class AssemblerRISCVF : public AssemblerRiscvBase {
+ // RV32F Standard Extension
+ public:
+ void flw(FPURegister rd, Register rs1, int16_t imm12);
+ void fsw(FPURegister source, Register base, int16_t imm12);
+ void fmadd_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, FPURoundingMode frm = RNE);
+ void fmsub_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, FPURoundingMode frm = RNE);
+ void fnmsub_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, FPURoundingMode frm = RNE);
+ void fnmadd_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, FPURoundingMode frm = RNE);
+ void fadd_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURoundingMode frm = RNE);
+ void fsub_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURoundingMode frm = RNE);
+ void fmul_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURoundingMode frm = RNE);
+ void fdiv_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURoundingMode frm = RNE);
+ void fsqrt_s(FPURegister rd, FPURegister rs1, FPURoundingMode frm = RNE);
+ void fsgnj_s(FPURegister rd, FPURegister rs1, FPURegister rs2);
+ void fsgnjn_s(FPURegister rd, FPURegister rs1, FPURegister rs2);
+ void fsgnjx_s(FPURegister rd, FPURegister rs1, FPURegister rs2);
+ void fmin_s(FPURegister rd, FPURegister rs1, FPURegister rs2);
+ void fmax_s(FPURegister rd, FPURegister rs1, FPURegister rs2);
+ void fcvt_w_s(Register rd, FPURegister rs1, FPURoundingMode frm = RNE);
+ void fcvt_wu_s(Register rd, FPURegister rs1, FPURoundingMode frm = RNE);
+ void fmv_x_w(Register rd, FPURegister rs1);
+ void feq_s(Register rd, FPURegister rs1, FPURegister rs2);
+ void flt_s(Register rd, FPURegister rs1, FPURegister rs2);
+ void fle_s(Register rd, FPURegister rs1, FPURegister rs2);
+ void fclass_s(Register rd, FPURegister rs1);
+ void fcvt_s_w(FPURegister rd, Register rs1, FPURoundingMode frm = RNE);
+ void fcvt_s_wu(FPURegister rd, Register rs1, FPURoundingMode frm = RNE);
+ void fmv_w_x(FPURegister rd, Register rs1);
+
+#ifdef JS_CODEGEN_RISCV64
+ // RV64F Standard Extension (in addition to RV32F)
+ void fcvt_l_s(Register rd, FPURegister rs1, FPURoundingMode frm = RNE);
+ void fcvt_lu_s(Register rd, FPURegister rs1, FPURoundingMode frm = RNE);
+ void fcvt_s_l(FPURegister rd, Register rs1, FPURoundingMode frm = RNE);
+ void fcvt_s_lu(FPURegister rd, Register rs1, FPURoundingMode frm = RNE);
+#endif
+
+ void fmv_s(FPURegister rd, FPURegister rs) { fsgnj_s(rd, rs, rs); }
+ void fabs_s(FPURegister rd, FPURegister rs) { fsgnjx_s(rd, rs, rs); }
+ void fneg_s(FPURegister rd, FPURegister rs) { fsgnjn_s(rd, rs, rs); }
+};
+} // namespace jit
+} // namespace js
+#endif // jit_riscv64_extension_Extension_riscv_F_h_
diff --git a/js/src/jit/riscv64/extension/extension-riscv-m.cc b/js/src/jit/riscv64/extension/extension-riscv-m.cc
new file mode 100644
index 0000000000..b5fcd6c34c
--- /dev/null
+++ b/js/src/jit/riscv64/extension/extension-riscv-m.cc
@@ -0,0 +1,68 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#include "jit/riscv64/extension/extension-riscv-m.h"
+#include "jit/riscv64/Assembler-riscv64.h"
+#include "jit/riscv64/constant/Constant-riscv64.h"
+#include "jit/riscv64/Architecture-riscv64.h"
+namespace js {
+namespace jit {
+// RV32M Standard Extension
+
+void AssemblerRISCVM::mul(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000001, 0b000, rd, rs1, rs2);
+}
+
+void AssemblerRISCVM::mulh(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000001, 0b001, rd, rs1, rs2);
+}
+
+void AssemblerRISCVM::mulhsu(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000001, 0b010, rd, rs1, rs2);
+}
+
+void AssemblerRISCVM::mulhu(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000001, 0b011, rd, rs1, rs2);
+}
+
+void AssemblerRISCVM::div(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000001, 0b100, rd, rs1, rs2);
+}
+
+void AssemblerRISCVM::divu(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000001, 0b101, rd, rs1, rs2);
+}
+
+void AssemblerRISCVM::rem(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000001, 0b110, rd, rs1, rs2);
+}
+
+void AssemblerRISCVM::remu(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000001, 0b111, rd, rs1, rs2);
+}
+
+#ifdef JS_CODEGEN_RISCV64
+// RV64M Standard Extension (in addition to RV32M)
+
+void AssemblerRISCVM::mulw(Register rd, Register rs1, Register rs2) {
+ GenInstrALUW_rr(0b0000001, 0b000, rd, rs1, rs2);
+}
+
+void AssemblerRISCVM::divw(Register rd, Register rs1, Register rs2) {
+ GenInstrALUW_rr(0b0000001, 0b100, rd, rs1, rs2);
+}
+
+void AssemblerRISCVM::divuw(Register rd, Register rs1, Register rs2) {
+ GenInstrALUW_rr(0b0000001, 0b101, rd, rs1, rs2);
+}
+
+void AssemblerRISCVM::remw(Register rd, Register rs1, Register rs2) {
+ GenInstrALUW_rr(0b0000001, 0b110, rd, rs1, rs2);
+}
+
+void AssemblerRISCVM::remuw(Register rd, Register rs1, Register rs2) {
+ GenInstrALUW_rr(0b0000001, 0b111, rd, rs1, rs2);
+}
+#endif
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/riscv64/extension/extension-riscv-m.h b/js/src/jit/riscv64/extension/extension-riscv-m.h
new file mode 100644
index 0000000000..7c2c932516
--- /dev/null
+++ b/js/src/jit/riscv64/extension/extension-riscv-m.h
@@ -0,0 +1,37 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef jit_riscv64_extension_Extension_riscv_m_h_
+#define jit_riscv64_extension_Extension_riscv_m_h_
+#include "mozilla/Assertions.h"
+
+#include <stdint.h>
+
+#include "jit/riscv64/extension/base-assembler-riscv.h"
+#include "jit/riscv64/Register-riscv64.h"
+namespace js {
+namespace jit {
+class AssemblerRISCVM : public AssemblerRiscvBase {
+ // RV32M Standard Extension
+ public:
+ void mul(Register rd, Register rs1, Register rs2);
+ void mulh(Register rd, Register rs1, Register rs2);
+ void mulhsu(Register rd, Register rs1, Register rs2);
+ void mulhu(Register rd, Register rs1, Register rs2);
+ void div(Register rd, Register rs1, Register rs2);
+ void divu(Register rd, Register rs1, Register rs2);
+ void rem(Register rd, Register rs1, Register rs2);
+ void remu(Register rd, Register rs1, Register rs2);
+#ifdef JS_CODEGEN_RISCV64
+ // RV64M Standard Extension (in addition to RV32M)
+ void mulw(Register rd, Register rs1, Register rs2);
+ void divw(Register rd, Register rs1, Register rs2);
+ void divuw(Register rd, Register rs1, Register rs2);
+ void remw(Register rd, Register rs1, Register rs2);
+ void remuw(Register rd, Register rs1, Register rs2);
+#endif
+};
+} // namespace jit
+} // namespace js
+#endif // jit_riscv64_extension_Extension_riscv_M_h_
diff --git a/js/src/jit/riscv64/extension/extension-riscv-v.cc b/js/src/jit/riscv64/extension/extension-riscv-v.cc
new file mode 100644
index 0000000000..c7241158e0
--- /dev/null
+++ b/js/src/jit/riscv64/extension/extension-riscv-v.cc
@@ -0,0 +1,891 @@
+
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "jit/riscv64/extension/extension-riscv-v.h"
+
+#ifdef CAN_USE_RVV
+# include "src/codegen/assembler.h"
+# include "jit/riscv64/constant/Constant-riscv64.h"
+# include "jit/riscv64/extension/register-riscv.h"
+
+namespace js {
+namespace jit {
+
+// RVV
+
+void AssemblerRISCVV::vredmaxu_vs(VRegister vd, VRegister vs2, VRegister vs1,
+ MaskType mask) {
+ GenInstrV(VREDMAXU_FUNCT6, OP_MVV, vd, vs1, vs2, mask);
+}
+
+void AssemblerRISCVV::vredmax_vs(VRegister vd, VRegister vs2, VRegister vs1,
+ MaskType mask) {
+ GenInstrV(VREDMAX_FUNCT6, OP_MVV, vd, vs1, vs2, mask);
+}
+
+void AssemblerRISCVV::vredmin_vs(VRegister vd, VRegister vs2, VRegister vs1,
+ MaskType mask) {
+ GenInstrV(VREDMIN_FUNCT6, OP_MVV, vd, vs1, vs2, mask);
+}
+
+void AssemblerRISCVV::vredminu_vs(VRegister vd, VRegister vs2, VRegister vs1,
+ MaskType mask) {
+ GenInstrV(VREDMINU_FUNCT6, OP_MVV, vd, vs1, vs2, mask);
+}
+
+void AssemblerRISCVV::vmv_vv(VRegister vd, VRegister vs1) {
+ GenInstrV(VMV_FUNCT6, OP_IVV, vd, vs1, v0, NoMask);
+}
+
+void AssemblerRISCVV::vmv_vx(VRegister vd, Register rs1) {
+ GenInstrV(VMV_FUNCT6, OP_IVX, vd, rs1, v0, NoMask);
+}
+
+void AssemblerRISCVV::vmv_vi(VRegister vd, uint8_t simm5) {
+ GenInstrV(VMV_FUNCT6, vd, simm5, v0, NoMask);
+}
+
+void AssemblerRISCVV::vmv_xs(Register rd, VRegister vs2) {
+ GenInstrV(VWXUNARY0_FUNCT6, OP_MVV, rd, 0b00000, vs2, NoMask);
+}
+
+void AssemblerRISCVV::vmv_sx(VRegister vd, Register rs1) {
+ GenInstrV(VRXUNARY0_FUNCT6, OP_MVX, vd, rs1, v0, NoMask);
+}
+
+void AssemblerRISCVV::vmerge_vv(VRegister vd, VRegister vs1, VRegister vs2) {
+ GenInstrV(VMV_FUNCT6, OP_IVV, vd, vs1, vs2, Mask);
+}
+
+void AssemblerRISCVV::vmerge_vx(VRegister vd, Register rs1, VRegister vs2) {
+ GenInstrV(VMV_FUNCT6, OP_IVX, vd, rs1, vs2, Mask);
+}
+
+void AssemblerRISCVV::vmerge_vi(VRegister vd, uint8_t imm5, VRegister vs2) {
+ GenInstrV(VMV_FUNCT6, vd, imm5, vs2, Mask);
+}
+
+void AssemblerRISCVV::vadc_vv(VRegister vd, VRegister vs1, VRegister vs2) {
+ GenInstrV(VADC_FUNCT6, OP_IVV, vd, vs1, vs2, Mask);
+}
+
+void AssemblerRISCVV::vadc_vx(VRegister vd, Register rs1, VRegister vs2) {
+ GenInstrV(VADC_FUNCT6, OP_IVX, vd, rs1, vs2, Mask);
+}
+
+void AssemblerRISCVV::vadc_vi(VRegister vd, uint8_t imm5, VRegister vs2) {
+ GenInstrV(VADC_FUNCT6, vd, imm5, vs2, Mask);
+}
+
+void AssemblerRISCVV::vmadc_vv(VRegister vd, VRegister vs1, VRegister vs2) {
+ GenInstrV(VMADC_FUNCT6, OP_IVV, vd, vs1, vs2, Mask);
+}
+
+void AssemblerRISCVV::vmadc_vx(VRegister vd, Register rs1, VRegister vs2) {
+ GenInstrV(VMADC_FUNCT6, OP_IVX, vd, rs1, vs2, Mask);
+}
+
+void AssemblerRISCVV::vmadc_vi(VRegister vd, uint8_t imm5, VRegister vs2) {
+ GenInstrV(VMADC_FUNCT6, vd, imm5, vs2, Mask);
+}
+
+void AssemblerRISCVV::vrgather_vv(VRegister vd, VRegister vs2, VRegister vs1,
+ MaskType mask) {
+ DCHECK_NE(vd, vs1);
+ DCHECK_NE(vd, vs2);
+ GenInstrV(VRGATHER_FUNCT6, OP_IVV, vd, vs1, vs2, mask);
+}
+
+void AssemblerRISCVV::vrgather_vi(VRegister vd, VRegister vs2, int8_t imm5,
+ MaskType mask) {
+ DCHECK_NE(vd, vs2);
+ GenInstrV(VRGATHER_FUNCT6, vd, imm5, vs2, mask);
+}
+
+void AssemblerRISCVV::vrgather_vx(VRegister vd, VRegister vs2, Register rs1,
+ MaskType mask) {
+ DCHECK_NE(vd, vs2);
+ GenInstrV(VRGATHER_FUNCT6, OP_IVX, vd, rs1, vs2, mask);
+}
+
+void AssemblerRISCVV::vwaddu_wx(VRegister vd, VRegister vs2, Register rs1,
+ MaskType mask) {
+ GenInstrV(VWADDUW_FUNCT6, OP_MVX, vd, rs1, vs2, mask);
+}
+
+void AssemblerRISCVV::vid_v(VRegister vd, MaskType mask) {
+ GenInstrV(VMUNARY0_FUNCT6, OP_MVV, vd, VID_V, v0, mask);
+}
+
+# define DEFINE_OPIVV(name, funct6) \
+ void AssemblerRISCVV::name##_vv(VRegister vd, VRegister vs2, \
+ VRegister vs1, MaskType mask) { \
+ GenInstrV(funct6, OP_IVV, vd, vs1, vs2, mask); \
+ }
+
+# define DEFINE_OPFVV(name, funct6) \
+ void AssemblerRISCVV::name##_vv(VRegister vd, VRegister vs2, \
+ VRegister vs1, MaskType mask) { \
+ GenInstrV(funct6, OP_FVV, vd, vs1, vs2, mask); \
+ }
+
+# define DEFINE_OPFWV(name, funct6) \
+ void AssemblerRISCVV::name##_wv(VRegister vd, VRegister vs2, \
+ VRegister vs1, MaskType mask) { \
+ GenInstrV(funct6, OP_FVV, vd, vs1, vs2, mask); \
+ }
+
+# define DEFINE_OPFRED(name, funct6) \
+ void AssemblerRISCVV::name##_vs(VRegister vd, VRegister vs2, \
+ VRegister vs1, MaskType mask) { \
+ GenInstrV(funct6, OP_FVV, vd, vs1, vs2, mask); \
+ }
+
+# define DEFINE_OPIVX(name, funct6) \
+ void AssemblerRISCVV::name##_vx(VRegister vd, VRegister vs2, Register rs1, \
+ MaskType mask) { \
+ GenInstrV(funct6, OP_IVX, vd, rs1, vs2, mask); \
+ }
+
+# define DEFINE_OPIVI(name, funct6) \
+ void AssemblerRISCVV::name##_vi(VRegister vd, VRegister vs2, int8_t imm5, \
+ MaskType mask) { \
+ GenInstrV(funct6, vd, imm5, vs2, mask); \
+ }
+
+# define DEFINE_OPMVV(name, funct6) \
+ void AssemblerRISCVV::name##_vv(VRegister vd, VRegister vs2, \
+ VRegister vs1, MaskType mask) { \
+ GenInstrV(funct6, OP_MVV, vd, vs1, vs2, mask); \
+ }
+
+// void GenInstrV(uint8_t funct6, OpcodeRISCVV opcode, VRegister vd, Register
+// rs1,
+// VRegister vs2, MaskType mask = NoMask);
+# define DEFINE_OPMVX(name, funct6) \
+ void AssemblerRISCVV::name##_vx(VRegister vd, VRegister vs2, Register rs1, \
+ MaskType mask) { \
+ GenInstrV(funct6, OP_MVX, vd, rs1, vs2, mask); \
+ }
+
+# define DEFINE_OPFVF(name, funct6) \
+ void AssemblerRISCVV::name##_vf(VRegister vd, VRegister vs2, \
+ FPURegister fs1, MaskType mask) { \
+ GenInstrV(funct6, OP_FVF, vd, fs1, vs2, mask); \
+ }
+
+# define DEFINE_OPFWF(name, funct6) \
+ void AssemblerRISCVV::name##_wf(VRegister vd, VRegister vs2, \
+ FPURegister fs1, MaskType mask) { \
+ GenInstrV(funct6, OP_FVF, vd, fs1, vs2, mask); \
+ }
+
+# define DEFINE_OPFVV_FMA(name, funct6) \
+ void AssemblerRISCVV::name##_vv(VRegister vd, VRegister vs1, \
+ VRegister vs2, MaskType mask) { \
+ GenInstrV(funct6, OP_FVV, vd, vs1, vs2, mask); \
+ }
+
+# define DEFINE_OPFVF_FMA(name, funct6) \
+ void AssemblerRISCVV::name##_vf(VRegister vd, FPURegister fs1, \
+ VRegister vs2, MaskType mask) { \
+ GenInstrV(funct6, OP_FVF, vd, fs1, vs2, mask); \
+ }
+
+// vector integer extension
+# define DEFINE_OPMVV_VIE(name, vs1) \
+ void AssemblerRISCVV::name(VRegister vd, VRegister vs2, MaskType mask) { \
+ GenInstrV(VXUNARY0_FUNCT6, OP_MVV, vd, vs1, vs2, mask); \
+ }
+
+void AssemblerRISCVV::vfmv_vf(VRegister vd, FPURegister fs1, MaskType mask) {
+ GenInstrV(VMV_FUNCT6, OP_FVF, vd, fs1, v0, mask);
+}
+
+void AssemblerRISCVV::vfmv_fs(FPURegister fd, VRegister vs2) {
+ GenInstrV(VWFUNARY0_FUNCT6, OP_FVV, fd, v0, vs2, NoMask);
+}
+
+void AssemblerRISCVV::vfmv_sf(VRegister vd, FPURegister fs) {
+ GenInstrV(VRFUNARY0_FUNCT6, OP_FVF, vd, fs, v0, NoMask);
+}
+
+DEFINE_OPIVV(vadd, VADD_FUNCT6)
+DEFINE_OPIVX(vadd, VADD_FUNCT6)
+DEFINE_OPIVI(vadd, VADD_FUNCT6)
+DEFINE_OPIVV(vsub, VSUB_FUNCT6)
+DEFINE_OPIVX(vsub, VSUB_FUNCT6)
+DEFINE_OPMVX(vdiv, VDIV_FUNCT6)
+DEFINE_OPMVX(vdivu, VDIVU_FUNCT6)
+DEFINE_OPMVX(vmul, VMUL_FUNCT6)
+DEFINE_OPMVX(vmulhu, VMULHU_FUNCT6)
+DEFINE_OPMVX(vmulhsu, VMULHSU_FUNCT6)
+DEFINE_OPMVX(vmulh, VMULH_FUNCT6)
+DEFINE_OPMVV(vdiv, VDIV_FUNCT6)
+DEFINE_OPMVV(vdivu, VDIVU_FUNCT6)
+DEFINE_OPMVV(vmul, VMUL_FUNCT6)
+DEFINE_OPMVV(vmulhu, VMULHU_FUNCT6)
+DEFINE_OPMVV(vmulhsu, VMULHSU_FUNCT6)
+DEFINE_OPMVV(vwmul, VWMUL_FUNCT6)
+DEFINE_OPMVV(vwmulu, VWMULU_FUNCT6)
+DEFINE_OPMVV(vmulh, VMULH_FUNCT6)
+DEFINE_OPMVV(vwadd, VWADD_FUNCT6)
+DEFINE_OPMVV(vwaddu, VWADDU_FUNCT6)
+DEFINE_OPMVV(vcompress, VCOMPRESS_FUNCT6)
+DEFINE_OPIVX(vsadd, VSADD_FUNCT6)
+DEFINE_OPIVV(vsadd, VSADD_FUNCT6)
+DEFINE_OPIVI(vsadd, VSADD_FUNCT6)
+DEFINE_OPIVX(vsaddu, VSADDU_FUNCT6)
+DEFINE_OPIVV(vsaddu, VSADDU_FUNCT6)
+DEFINE_OPIVI(vsaddu, VSADDU_FUNCT6)
+DEFINE_OPIVX(vssub, VSSUB_FUNCT6)
+DEFINE_OPIVV(vssub, VSSUB_FUNCT6)
+DEFINE_OPIVX(vssubu, VSSUBU_FUNCT6)
+DEFINE_OPIVV(vssubu, VSSUBU_FUNCT6)
+DEFINE_OPIVX(vrsub, VRSUB_FUNCT6)
+DEFINE_OPIVI(vrsub, VRSUB_FUNCT6)
+DEFINE_OPIVV(vminu, VMINU_FUNCT6)
+DEFINE_OPIVX(vminu, VMINU_FUNCT6)
+DEFINE_OPIVV(vmin, VMIN_FUNCT6)
+DEFINE_OPIVX(vmin, VMIN_FUNCT6)
+DEFINE_OPIVV(vmaxu, VMAXU_FUNCT6)
+DEFINE_OPIVX(vmaxu, VMAXU_FUNCT6)
+DEFINE_OPIVV(vmax, VMAX_FUNCT6)
+DEFINE_OPIVX(vmax, VMAX_FUNCT6)
+DEFINE_OPIVV(vand, VAND_FUNCT6)
+DEFINE_OPIVX(vand, VAND_FUNCT6)
+DEFINE_OPIVI(vand, VAND_FUNCT6)
+DEFINE_OPIVV(vor, VOR_FUNCT6)
+DEFINE_OPIVX(vor, VOR_FUNCT6)
+DEFINE_OPIVI(vor, VOR_FUNCT6)
+DEFINE_OPIVV(vxor, VXOR_FUNCT6)
+DEFINE_OPIVX(vxor, VXOR_FUNCT6)
+DEFINE_OPIVI(vxor, VXOR_FUNCT6)
+
+DEFINE_OPIVX(vslidedown, VSLIDEDOWN_FUNCT6)
+DEFINE_OPIVI(vslidedown, VSLIDEDOWN_FUNCT6)
+DEFINE_OPIVX(vslideup, VSLIDEUP_FUNCT6)
+DEFINE_OPIVI(vslideup, VSLIDEUP_FUNCT6)
+
+DEFINE_OPIVV(vmseq, VMSEQ_FUNCT6)
+DEFINE_OPIVX(vmseq, VMSEQ_FUNCT6)
+DEFINE_OPIVI(vmseq, VMSEQ_FUNCT6)
+
+DEFINE_OPIVV(vmsne, VMSNE_FUNCT6)
+DEFINE_OPIVX(vmsne, VMSNE_FUNCT6)
+DEFINE_OPIVI(vmsne, VMSNE_FUNCT6)
+
+DEFINE_OPIVV(vmsltu, VMSLTU_FUNCT6)
+DEFINE_OPIVX(vmsltu, VMSLTU_FUNCT6)
+
+DEFINE_OPIVV(vmslt, VMSLT_FUNCT6)
+DEFINE_OPIVX(vmslt, VMSLT_FUNCT6)
+
+DEFINE_OPIVV(vmsle, VMSLE_FUNCT6)
+DEFINE_OPIVX(vmsle, VMSLE_FUNCT6)
+DEFINE_OPIVI(vmsle, VMSLE_FUNCT6)
+
+DEFINE_OPIVV(vmsleu, VMSLEU_FUNCT6)
+DEFINE_OPIVX(vmsleu, VMSLEU_FUNCT6)
+DEFINE_OPIVI(vmsleu, VMSLEU_FUNCT6)
+
+DEFINE_OPIVI(vmsgt, VMSGT_FUNCT6)
+DEFINE_OPIVX(vmsgt, VMSGT_FUNCT6)
+
+DEFINE_OPIVI(vmsgtu, VMSGTU_FUNCT6)
+DEFINE_OPIVX(vmsgtu, VMSGTU_FUNCT6)
+
+DEFINE_OPIVV(vsrl, VSRL_FUNCT6)
+DEFINE_OPIVX(vsrl, VSRL_FUNCT6)
+DEFINE_OPIVI(vsrl, VSRL_FUNCT6)
+
+DEFINE_OPIVV(vsra, VSRA_FUNCT6)
+DEFINE_OPIVX(vsra, VSRA_FUNCT6)
+DEFINE_OPIVI(vsra, VSRA_FUNCT6)
+
+DEFINE_OPIVV(vsll, VSLL_FUNCT6)
+DEFINE_OPIVX(vsll, VSLL_FUNCT6)
+DEFINE_OPIVI(vsll, VSLL_FUNCT6)
+
+DEFINE_OPIVV(vsmul, VSMUL_FUNCT6)
+DEFINE_OPIVX(vsmul, VSMUL_FUNCT6)
+
+DEFINE_OPFVV(vfadd, VFADD_FUNCT6)
+DEFINE_OPFVF(vfadd, VFADD_FUNCT6)
+DEFINE_OPFVV(vfsub, VFSUB_FUNCT6)
+DEFINE_OPFVF(vfsub, VFSUB_FUNCT6)
+DEFINE_OPFVV(vfdiv, VFDIV_FUNCT6)
+DEFINE_OPFVF(vfdiv, VFDIV_FUNCT6)
+DEFINE_OPFVV(vfmul, VFMUL_FUNCT6)
+DEFINE_OPFVF(vfmul, VFMUL_FUNCT6)
+DEFINE_OPFVV(vmfeq, VMFEQ_FUNCT6)
+DEFINE_OPFVV(vmfne, VMFNE_FUNCT6)
+DEFINE_OPFVV(vmflt, VMFLT_FUNCT6)
+DEFINE_OPFVV(vmfle, VMFLE_FUNCT6)
+DEFINE_OPFVV(vfmax, VFMAX_FUNCT6)
+DEFINE_OPFVV(vfmin, VFMIN_FUNCT6)
+
+// Vector Widening Floating-Point Add/Subtract Instructions
+DEFINE_OPFVV(vfwadd, VFWADD_FUNCT6)
+DEFINE_OPFVF(vfwadd, VFWADD_FUNCT6)
+DEFINE_OPFVV(vfwsub, VFWSUB_FUNCT6)
+DEFINE_OPFVF(vfwsub, VFWSUB_FUNCT6)
+DEFINE_OPFWV(vfwadd, VFWADD_W_FUNCT6)
+DEFINE_OPFWF(vfwadd, VFWADD_W_FUNCT6)
+DEFINE_OPFWV(vfwsub, VFWSUB_W_FUNCT6)
+DEFINE_OPFWF(vfwsub, VFWSUB_W_FUNCT6)
+
+// Vector Widening Floating-Point Reduction Instructions
+DEFINE_OPFVV(vfwredusum, VFWREDUSUM_FUNCT6)
+DEFINE_OPFVV(vfwredosum, VFWREDOSUM_FUNCT6)
+
+// Vector Widening Floating-Point Multiply
+DEFINE_OPFVV(vfwmul, VFWMUL_FUNCT6)
+DEFINE_OPFVF(vfwmul, VFWMUL_FUNCT6)
+
+DEFINE_OPFRED(vfredmax, VFREDMAX_FUNCT6)
+
+DEFINE_OPFVV(vfsngj, VFSGNJ_FUNCT6)
+DEFINE_OPFVF(vfsngj, VFSGNJ_FUNCT6)
+DEFINE_OPFVV(vfsngjn, VFSGNJN_FUNCT6)
+DEFINE_OPFVF(vfsngjn, VFSGNJN_FUNCT6)
+DEFINE_OPFVV(vfsngjx, VFSGNJX_FUNCT6)
+DEFINE_OPFVF(vfsngjx, VFSGNJX_FUNCT6)
+
+// Vector Single-Width Floating-Point Fused Multiply-Add Instructions
+DEFINE_OPFVV_FMA(vfmadd, VFMADD_FUNCT6)
+DEFINE_OPFVF_FMA(vfmadd, VFMADD_FUNCT6)
+DEFINE_OPFVV_FMA(vfmsub, VFMSUB_FUNCT6)
+DEFINE_OPFVF_FMA(vfmsub, VFMSUB_FUNCT6)
+DEFINE_OPFVV_FMA(vfmacc, VFMACC_FUNCT6)
+DEFINE_OPFVF_FMA(vfmacc, VFMACC_FUNCT6)
+DEFINE_OPFVV_FMA(vfmsac, VFMSAC_FUNCT6)
+DEFINE_OPFVF_FMA(vfmsac, VFMSAC_FUNCT6)
+DEFINE_OPFVV_FMA(vfnmadd, VFNMADD_FUNCT6)
+DEFINE_OPFVF_FMA(vfnmadd, VFNMADD_FUNCT6)
+DEFINE_OPFVV_FMA(vfnmsub, VFNMSUB_FUNCT6)
+DEFINE_OPFVF_FMA(vfnmsub, VFNMSUB_FUNCT6)
+DEFINE_OPFVV_FMA(vfnmacc, VFNMACC_FUNCT6)
+DEFINE_OPFVF_FMA(vfnmacc, VFNMACC_FUNCT6)
+DEFINE_OPFVV_FMA(vfnmsac, VFNMSAC_FUNCT6)
+DEFINE_OPFVF_FMA(vfnmsac, VFNMSAC_FUNCT6)
+
+// Vector Widening Floating-Point Fused Multiply-Add Instructions
+DEFINE_OPFVV_FMA(vfwmacc, VFWMACC_FUNCT6)
+DEFINE_OPFVF_FMA(vfwmacc, VFWMACC_FUNCT6)
+DEFINE_OPFVV_FMA(vfwnmacc, VFWNMACC_FUNCT6)
+DEFINE_OPFVF_FMA(vfwnmacc, VFWNMACC_FUNCT6)
+DEFINE_OPFVV_FMA(vfwmsac, VFWMSAC_FUNCT6)
+DEFINE_OPFVF_FMA(vfwmsac, VFWMSAC_FUNCT6)
+DEFINE_OPFVV_FMA(vfwnmsac, VFWNMSAC_FUNCT6)
+DEFINE_OPFVF_FMA(vfwnmsac, VFWNMSAC_FUNCT6)
+
+// Vector Narrowing Fixed-Point Clip Instructions
+DEFINE_OPIVV(vnclip, VNCLIP_FUNCT6)
+DEFINE_OPIVX(vnclip, VNCLIP_FUNCT6)
+DEFINE_OPIVI(vnclip, VNCLIP_FUNCT6)
+DEFINE_OPIVV(vnclipu, VNCLIPU_FUNCT6)
+DEFINE_OPIVX(vnclipu, VNCLIPU_FUNCT6)
+DEFINE_OPIVI(vnclipu, VNCLIPU_FUNCT6)
+
+// Vector Integer Extension
+DEFINE_OPMVV_VIE(vzext_vf8, 0b00010)
+DEFINE_OPMVV_VIE(vsext_vf8, 0b00011)
+DEFINE_OPMVV_VIE(vzext_vf4, 0b00100)
+DEFINE_OPMVV_VIE(vsext_vf4, 0b00101)
+DEFINE_OPMVV_VIE(vzext_vf2, 0b00110)
+DEFINE_OPMVV_VIE(vsext_vf2, 0b00111)
+
+# undef DEFINE_OPIVI
+# undef DEFINE_OPIVV
+# undef DEFINE_OPIVX
+# undef DEFINE_OPFVV
+# undef DEFINE_OPFWV
+# undef DEFINE_OPFVF
+# undef DEFINE_OPFWF
+# undef DEFINE_OPFVV_FMA
+# undef DEFINE_OPFVF_FMA
+# undef DEFINE_OPMVV_VIE
+
+void AssemblerRISCVV::vsetvli(Register rd, Register rs1, VSew vsew, Vlmul vlmul,
+ TailAgnosticType tail, MaskAgnosticType mask) {
+ int32_t zimm = GenZimm(vsew, vlmul, tail, mask);
+ Instr instr = OP_V | ((rd.code() & 0x1F) << kRvvRdShift) | (0x7 << 12) |
+ ((rs1.code() & 0x1F) << kRvvRs1Shift) |
+ (((uint32_t)zimm << kRvvZimmShift) & kRvvZimmMask) | 0x0 << 31;
+ emit(instr);
+}
+
+void AssemblerRISCVV::vsetivli(Register rd, uint8_t uimm, VSew vsew,
+ Vlmul vlmul, TailAgnosticType tail,
+ MaskAgnosticType mask) {
+ MOZ_ASSERT(is_uint5(uimm));
+ int32_t zimm = GenZimm(vsew, vlmul, tail, mask) & 0x3FF;
+ Instr instr = OP_V | ((rd.code() & 0x1F) << kRvvRdShift) | (0x7 << 12) |
+ ((uimm & 0x1F) << kRvvUimmShift) |
+ (((uint32_t)zimm << kRvvZimmShift) & kRvvZimmMask) | 0x3 << 30;
+ emit(instr);
+}
+
+void AssemblerRISCVV::vsetvl(Register rd, Register rs1, Register rs2) {
+ Instr instr = OP_V | ((rd.code() & 0x1F) << kRvvRdShift) | (0x7 << 12) |
+ ((rs1.code() & 0x1F) << kRvvRs1Shift) |
+ ((rs2.code() & 0x1F) << kRvvRs2Shift) | 0x40 << 25;
+ emit(instr);
+}
+
+uint8_t vsew_switch(VSew vsew) {
+ uint8_t width;
+ switch (vsew) {
+ case E8:
+ width = 0b000;
+ break;
+ case E16:
+ width = 0b101;
+ break;
+ case E32:
+ width = 0b110;
+ break;
+ default:
+ width = 0b111;
+ break;
+ }
+ return width;
+}
+
+// OPIVV OPFVV OPMVV
+void AssemblerRISCVV::GenInstrV(uint8_t funct6, OpcodeRISCVV opcode,
+ VRegister vd, VRegister vs1, VRegister vs2,
+ MaskType mask) {
+ MOZ_ASSERT(opcode == OP_MVV || opcode == OP_FVV || opcode == OP_IVV);
+ Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
+ ((vd.code() & 0x1F) << kRvvVdShift) |
+ ((vs1.code() & 0x1F) << kRvvVs1Shift) |
+ ((vs2.code() & 0x1F) << kRvvVs2Shift);
+ emit(instr);
+}
+
+void AssemblerRISCVV::GenInstrV(uint8_t funct6, OpcodeRISCVV opcode,
+ VRegister vd, int8_t vs1, VRegister vs2,
+ MaskType mask) {
+ MOZ_ASSERT(opcode == OP_MVV || opcode == OP_FVV || opcode == OP_IVV);
+ Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
+ ((vd.code() & 0x1F) << kRvvVdShift) |
+ ((vs1 & 0x1F) << kRvvVs1Shift) |
+ ((vs2.code() & 0x1F) << kRvvVs2Shift);
+ emit(instr);
+}
+// OPMVV OPFVV
+void AssemblerRISCVV::GenInstrV(uint8_t funct6, OpcodeRISCVV opcode,
+ Register rd, VRegister vs1, VRegister vs2,
+ MaskType mask) {
+ MOZ_ASSERT(opcode == OP_MVV || opcode == OP_FVV);
+ Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
+ ((rd.code() & 0x1F) << kRvvVdShift) |
+ ((vs1.code() & 0x1F) << kRvvVs1Shift) |
+ ((vs2.code() & 0x1F) << kRvvVs2Shift);
+ emit(instr);
+}
+
+// OPFVV
+void AssemblerRISCVV::GenInstrV(uint8_t funct6, OpcodeRISCVV opcode,
+ FPURegister fd, VRegister vs1, VRegister vs2,
+ MaskType mask) {
+ MOZ_ASSERT(opcode == OP_FVV);
+ Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
+ ((fd.code() & 0x1F) << kRvvVdShift) |
+ ((vs1.code() & 0x1F) << kRvvVs1Shift) |
+ ((vs2.code() & 0x1F) << kRvvVs2Shift);
+ emit(instr);
+}
+
+// OPIVX OPMVX
+void AssemblerRISCVV::GenInstrV(uint8_t funct6, OpcodeRISCVV opcode,
+ VRegister vd, Register rs1, VRegister vs2,
+ MaskType mask) {
+ MOZ_ASSERT(opcode == OP_IVX || opcode == OP_MVX);
+ Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
+ ((vd.code() & 0x1F) << kRvvVdShift) |
+ ((rs1.code() & 0x1F) << kRvvRs1Shift) |
+ ((vs2.code() & 0x1F) << kRvvVs2Shift);
+ emit(instr);
+}
+
+// OPFVF
+void AssemblerRISCVV::GenInstrV(uint8_t funct6, OpcodeRISCVV opcode,
+ VRegister vd, FPURegister fs1, VRegister vs2,
+ MaskType mask) {
+ MOZ_ASSERT(opcode == OP_FVF);
+ Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
+ ((vd.code() & 0x1F) << kRvvVdShift) |
+ ((fs1.code() & 0x1F) << kRvvRs1Shift) |
+ ((vs2.code() & 0x1F) << kRvvVs2Shift);
+ emit(instr);
+}
+
+// OPMVX
+void AssemblerRISCVV::GenInstrV(uint8_t funct6, Register rd, Register rs1,
+ VRegister vs2, MaskType mask) {
+ Instr instr = (funct6 << kRvvFunct6Shift) | OP_MVX | (mask << kRvvVmShift) |
+ ((rd.code() & 0x1F) << kRvvVdShift) |
+ ((rs1.code() & 0x1F) << kRvvRs1Shift) |
+ ((vs2.code() & 0x1F) << kRvvVs2Shift);
+ emit(instr);
+}
+// OPIVI
+void AssemblerRISCVV::GenInstrV(uint8_t funct6, VRegister vd, int8_t imm5,
+ VRegister vs2, MaskType mask) {
+ MOZ_ASSERT(is_uint5(imm5) || is_int5(imm5));
+ Instr instr = (funct6 << kRvvFunct6Shift) | OP_IVI | (mask << kRvvVmShift) |
+ ((vd.code() & 0x1F) << kRvvVdShift) |
+ (((uint32_t)imm5 << kRvvImm5Shift) & kRvvImm5Mask) |
+ ((vs2.code() & 0x1F) << kRvvVs2Shift);
+ emit(instr);
+}
+
+// VL VS
+void AssemblerRISCVV::GenInstrV(BaseOpcode opcode, uint8_t width, VRegister vd,
+ Register rs1, uint8_t umop, MaskType mask,
+ uint8_t IsMop, bool IsMew, uint8_t Nf) {
+ MOZ_ASSERT(opcode == LOAD_FP || opcode == STORE_FP);
+ Instr instr = opcode | ((vd.code() << kRvvVdShift) & kRvvVdMask) |
+ ((width << kRvvWidthShift) & kRvvWidthMask) |
+ ((rs1.code() << kRvvRs1Shift) & kRvvRs1Mask) |
+ ((umop << kRvvRs2Shift) & kRvvRs2Mask) |
+ ((mask << kRvvVmShift) & kRvvVmMask) |
+ ((IsMop << kRvvMopShift) & kRvvMopMask) |
+ ((IsMew << kRvvMewShift) & kRvvMewMask) |
+ ((Nf << kRvvNfShift) & kRvvNfMask);
+ emit(instr);
+}
+void AssemblerRISCVV::GenInstrV(BaseOpcode opcode, uint8_t width, VRegister vd,
+ Register rs1, Register rs2, MaskType mask,
+ uint8_t IsMop, bool IsMew, uint8_t Nf) {
+ MOZ_ASSERT(opcode == LOAD_FP || opcode == STORE_FP);
+ Instr instr = opcode | ((vd.code() << kRvvVdShift) & kRvvVdMask) |
+ ((width << kRvvWidthShift) & kRvvWidthMask) |
+ ((rs1.code() << kRvvRs1Shift) & kRvvRs1Mask) |
+ ((rs2.code() << kRvvRs2Shift) & kRvvRs2Mask) |
+ ((mask << kRvvVmShift) & kRvvVmMask) |
+ ((IsMop << kRvvMopShift) & kRvvMopMask) |
+ ((IsMew << kRvvMewShift) & kRvvMewMask) |
+ ((Nf << kRvvNfShift) & kRvvNfMask);
+ emit(instr);
+}
+// VL VS AMO
+void AssemblerRISCVV::GenInstrV(BaseOpcode opcode, uint8_t width, VRegister vd,
+ Register rs1, VRegister vs2, MaskType mask,
+ uint8_t IsMop, bool IsMew, uint8_t Nf) {
+ MOZ_ASSERT(opcode == LOAD_FP || opcode == STORE_FP || opcode == AMO);
+ Instr instr = opcode | ((vd.code() << kRvvVdShift) & kRvvVdMask) |
+ ((width << kRvvWidthShift) & kRvvWidthMask) |
+ ((rs1.code() << kRvvRs1Shift) & kRvvRs1Mask) |
+ ((vs2.code() << kRvvRs2Shift) & kRvvRs2Mask) |
+ ((mask << kRvvVmShift) & kRvvVmMask) |
+ ((IsMop << kRvvMopShift) & kRvvMopMask) |
+ ((IsMew << kRvvMewShift) & kRvvMewMask) |
+ ((Nf << kRvvNfShift) & kRvvNfMask);
+ emit(instr);
+}
+// vmv_xs vcpop_m vfirst_m
+void AssemblerRISCVV::GenInstrV(uint8_t funct6, OpcodeRISCVV opcode,
+ Register rd, uint8_t vs1, VRegister vs2,
+ MaskType mask) {
+ MOZ_ASSERT(opcode == OP_MVV);
+ Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
+ ((rd.code() & 0x1F) << kRvvVdShift) |
+ ((vs1 & 0x1F) << kRvvVs1Shift) |
+ ((vs2.code() & 0x1F) << kRvvVs2Shift);
+ emit(instr);
+}
+
+void AssemblerRISCVV::vl(VRegister vd, Register rs1, uint8_t lumop, VSew vsew,
+ MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b000);
+}
+void AssemblerRISCVV::vls(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b000);
+}
+void AssemblerRISCVV::vlx(VRegister vd, Register rs1, VRegister vs2, VSew vsew,
+ MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, vs2, mask, 0b11, 0, 0);
+}
+
+void AssemblerRISCVV::vs(VRegister vd, Register rs1, uint8_t sumop, VSew vsew,
+ MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b000);
+}
+void AssemblerRISCVV::vss(VRegister vs3, Register rs1, Register rs2, VSew vsew,
+ MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vs3, rs1, rs2, mask, 0b10, 0, 0b000);
+}
+
+void AssemblerRISCVV::vsx(VRegister vd, Register rs1, VRegister vs2, VSew vsew,
+ MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, vs2, mask, 0b11, 0, 0b000);
+}
+void AssemblerRISCVV::vsu(VRegister vd, Register rs1, VRegister vs2, VSew vsew,
+ MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, vs2, mask, 0b01, 0, 0b000);
+}
+
+void AssemblerRISCVV::vlseg2(VRegister vd, Register rs1, uint8_t lumop,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b001);
+}
+
+void AssemblerRISCVV::vlseg3(VRegister vd, Register rs1, uint8_t lumop,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b010);
+}
+
+void AssemblerRISCVV::vlseg4(VRegister vd, Register rs1, uint8_t lumop,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b011);
+}
+
+void AssemblerRISCVV::vlseg5(VRegister vd, Register rs1, uint8_t lumop,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b100);
+}
+
+void AssemblerRISCVV::vlseg6(VRegister vd, Register rs1, uint8_t lumop,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b101);
+}
+
+void AssemblerRISCVV::vlseg7(VRegister vd, Register rs1, uint8_t lumop,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b110);
+}
+
+void AssemblerRISCVV::vlseg8(VRegister vd, Register rs1, uint8_t lumop,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b111);
+}
+void AssemblerRISCVV::vsseg2(VRegister vd, Register rs1, uint8_t sumop,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b001);
+}
+void AssemblerRISCVV::vsseg3(VRegister vd, Register rs1, uint8_t sumop,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b010);
+}
+void AssemblerRISCVV::vsseg4(VRegister vd, Register rs1, uint8_t sumop,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b011);
+}
+void AssemblerRISCVV::vsseg5(VRegister vd, Register rs1, uint8_t sumop,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b100);
+}
+void AssemblerRISCVV::vsseg6(VRegister vd, Register rs1, uint8_t sumop,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b101);
+}
+void AssemblerRISCVV::vsseg7(VRegister vd, Register rs1, uint8_t sumop,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b110);
+}
+void AssemblerRISCVV::vsseg8(VRegister vd, Register rs1, uint8_t sumop,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b111);
+}
+
+void AssemblerRISCVV::vlsseg2(VRegister vd, Register rs1, Register rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b001);
+}
+void AssemblerRISCVV::vlsseg3(VRegister vd, Register rs1, Register rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b010);
+}
+void AssemblerRISCVV::vlsseg4(VRegister vd, Register rs1, Register rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b011);
+}
+void AssemblerRISCVV::vlsseg5(VRegister vd, Register rs1, Register rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b100);
+}
+void AssemblerRISCVV::vlsseg6(VRegister vd, Register rs1, Register rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b101);
+}
+void AssemblerRISCVV::vlsseg7(VRegister vd, Register rs1, Register rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b110);
+}
+void AssemblerRISCVV::vlsseg8(VRegister vd, Register rs1, Register rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b111);
+}
+void AssemblerRISCVV::vssseg2(VRegister vd, Register rs1, Register rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b001);
+}
+void AssemblerRISCVV::vssseg3(VRegister vd, Register rs1, Register rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b010);
+}
+void AssemblerRISCVV::vssseg4(VRegister vd, Register rs1, Register rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b011);
+}
+void AssemblerRISCVV::vssseg5(VRegister vd, Register rs1, Register rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b100);
+}
+void AssemblerRISCVV::vssseg6(VRegister vd, Register rs1, Register rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b101);
+}
+void AssemblerRISCVV::vssseg7(VRegister vd, Register rs1, Register rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b110);
+}
+void AssemblerRISCVV::vssseg8(VRegister vd, Register rs1, Register rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b111);
+}
+
+void AssemblerRISCVV::vlxseg2(VRegister vd, Register rs1, VRegister rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b001);
+}
+void AssemblerRISCVV::vlxseg3(VRegister vd, Register rs1, VRegister rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b010);
+}
+void AssemblerRISCVV::vlxseg4(VRegister vd, Register rs1, VRegister rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b011);
+}
+void AssemblerRISCVV::vlxseg5(VRegister vd, Register rs1, VRegister rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b100);
+}
+void AssemblerRISCVV::vlxseg6(VRegister vd, Register rs1, VRegister rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b101);
+}
+void AssemblerRISCVV::vlxseg7(VRegister vd, Register rs1, VRegister rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b110);
+}
+void AssemblerRISCVV::vlxseg8(VRegister vd, Register rs1, VRegister rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b111);
+}
+void AssemblerRISCVV::vsxseg2(VRegister vd, Register rs1, VRegister rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b001);
+}
+void AssemblerRISCVV::vsxseg3(VRegister vd, Register rs1, VRegister rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b010);
+}
+void AssemblerRISCVV::vsxseg4(VRegister vd, Register rs1, VRegister rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b011);
+}
+void AssemblerRISCVV::vsxseg5(VRegister vd, Register rs1, VRegister rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b100);
+}
+void AssemblerRISCVV::vsxseg6(VRegister vd, Register rs1, VRegister rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b101);
+}
+void AssemblerRISCVV::vsxseg7(VRegister vd, Register rs1, VRegister rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b110);
+}
+void AssemblerRISCVV::vsxseg8(VRegister vd, Register rs1, VRegister rs2,
+ VSew vsew, MaskType mask) {
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b111);
+}
+
+void AssemblerRISCVV::vfirst_m(Register rd, VRegister vs2, MaskType mask) {
+ GenInstrV(VWXUNARY0_FUNCT6, OP_MVV, rd, 0b10001, vs2, mask);
+}
+
+void AssemblerRISCVV::vcpop_m(Register rd, VRegister vs2, MaskType mask) {
+ GenInstrV(VWXUNARY0_FUNCT6, OP_MVV, rd, 0b10000, vs2, mask);
+}
+
+LoadStoreLaneParams::LoadStoreLaneParams(MachineRepresentation rep,
+ uint8_t laneidx) {
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ *this = LoadStoreLaneParams(laneidx, 8, kRvvVLEN / 16);
+ break;
+ case MachineRepresentation::kWord16:
+ *this = LoadStoreLaneParams(laneidx, 16, kRvvVLEN / 8);
+ break;
+ case MachineRepresentation::kWord32:
+ *this = LoadStoreLaneParams(laneidx, 32, kRvvVLEN / 4);
+ break;
+ case MachineRepresentation::kWord64:
+ *this = LoadStoreLaneParams(laneidx, 64, kRvvVLEN / 2);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+} // namespace jit
+} // namespace js
+#endif
diff --git a/js/src/jit/riscv64/extension/extension-riscv-v.h b/js/src/jit/riscv64/extension/extension-riscv-v.h
new file mode 100644
index 0000000000..8f04f24c56
--- /dev/null
+++ b/js/src/jit/riscv64/extension/extension-riscv-v.h
@@ -0,0 +1,484 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef jit_riscv64_extension_Extension_riscv_v_h_
+#define jit_riscv64_extension_Extension_riscv_v_h_
+#ifdef CAN_USE_RVV
+# include "jit/riscv64/Architecture-riscv64.h"
+# include "jit/riscv64/constant/Constant-riscv64.h"
+# include "jit/riscv64/extension/base-assembler-riscv.h"
+
+namespace js {
+namespace jit {
+
+class AssemblerRISCVV : public AssemblerRiscvBase {
+ public:
+ // RVV
+ static int32_t GenZimm(VSew vsew, Vlmul vlmul, TailAgnosticType tail = tu,
+ MaskAgnosticType mask = mu) {
+ return (mask << 7) | (tail << 6) | ((vsew & 0x7) << 3) | (vlmul & 0x7);
+ }
+
+ void vl(VRegister vd, Register rs1, uint8_t lumop, VSew vsew,
+ MaskType mask = NoMask);
+ void vls(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask = NoMask);
+ void vlx(VRegister vd, Register rs1, VRegister vs3, VSew vsew,
+ MaskType mask = NoMask);
+
+ void vs(VRegister vd, Register rs1, uint8_t sumop, VSew vsew,
+ MaskType mask = NoMask);
+ void vss(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask = NoMask);
+ void vsx(VRegister vd, Register rs1, VRegister vs3, VSew vsew,
+ MaskType mask = NoMask);
+
+ void vsu(VRegister vd, Register rs1, VRegister vs3, VSew vsew,
+ MaskType mask = NoMask);
+
+# define SegInstr(OP) \
+ void OP##seg2(ARG); \
+ void OP##seg3(ARG); \
+ void OP##seg4(ARG); \
+ void OP##seg5(ARG); \
+ void OP##seg6(ARG); \
+ void OP##seg7(ARG); \
+ void OP##seg8(ARG);
+
+# define ARG \
+ VRegister vd, Register rs1, uint8_t lumop, VSew vsew, MaskType mask = NoMask
+
+ SegInstr(vl) SegInstr(vs)
+# undef ARG
+
+# define ARG \
+ VRegister vd, Register rs1, Register rs2, VSew vsew, MaskType mask = NoMask
+
+ SegInstr(vls) SegInstr(vss)
+# undef ARG
+
+# define ARG \
+ VRegister vd, Register rs1, VRegister rs2, VSew vsew, MaskType mask = NoMask
+
+ SegInstr(vsx) SegInstr(vlx)
+# undef ARG
+# undef SegInstr
+
+ // RVV Vector Arithmetic Instruction
+
+ void vmv_vv(VRegister vd, VRegister vs1);
+ void vmv_vx(VRegister vd, Register rs1);
+ void vmv_vi(VRegister vd, uint8_t simm5);
+ void vmv_xs(Register rd, VRegister vs2);
+ void vmv_sx(VRegister vd, Register rs1);
+ void vmerge_vv(VRegister vd, VRegister vs1, VRegister vs2);
+ void vmerge_vx(VRegister vd, Register rs1, VRegister vs2);
+ void vmerge_vi(VRegister vd, uint8_t imm5, VRegister vs2);
+
+ void vredmaxu_vs(VRegister vd, VRegister vs2, VRegister vs1,
+ MaskType mask = NoMask);
+ void vredmax_vs(VRegister vd, VRegister vs2, VRegister vs1,
+ MaskType mask = NoMask);
+ void vredmin_vs(VRegister vd, VRegister vs2, VRegister vs1,
+ MaskType mask = NoMask);
+ void vredminu_vs(VRegister vd, VRegister vs2, VRegister vs1,
+ MaskType mask = NoMask);
+
+ void vadc_vv(VRegister vd, VRegister vs1, VRegister vs2);
+ void vadc_vx(VRegister vd, Register rs1, VRegister vs2);
+ void vadc_vi(VRegister vd, uint8_t imm5, VRegister vs2);
+
+ void vmadc_vv(VRegister vd, VRegister vs1, VRegister vs2);
+ void vmadc_vx(VRegister vd, Register rs1, VRegister vs2);
+ void vmadc_vi(VRegister vd, uint8_t imm5, VRegister vs2);
+
+ void vfmv_vf(VRegister vd, FPURegister fs1, MaskType mask = NoMask);
+ void vfmv_fs(FPURegister fd, VRegister vs2);
+ void vfmv_sf(VRegister vd, FPURegister fs);
+
+ void vwaddu_wx(VRegister vd, VRegister vs2, Register rs1,
+ MaskType mask = NoMask);
+ void vid_v(VRegister vd, MaskType mask = Mask);
+
+# define DEFINE_OPIVV(name, funct6) \
+ void name##_vv(VRegister vd, VRegister vs2, VRegister vs1, \
+ MaskType mask = NoMask);
+
+# define DEFINE_OPIVX(name, funct6) \
+ void name##_vx(VRegister vd, VRegister vs2, Register rs1, \
+ MaskType mask = NoMask);
+
+# define DEFINE_OPIVI(name, funct6) \
+ void name##_vi(VRegister vd, VRegister vs2, int8_t imm5, \
+ MaskType mask = NoMask);
+
+# define DEFINE_OPMVV(name, funct6) \
+ void name##_vv(VRegister vd, VRegister vs2, VRegister vs1, \
+ MaskType mask = NoMask);
+
+# define DEFINE_OPMVX(name, funct6) \
+ void name##_vx(VRegister vd, VRegister vs2, Register rs1, \
+ MaskType mask = NoMask);
+
+# define DEFINE_OPFVV(name, funct6) \
+ void name##_vv(VRegister vd, VRegister vs2, VRegister vs1, \
+ MaskType mask = NoMask);
+
+# define DEFINE_OPFWV(name, funct6) \
+ void name##_wv(VRegister vd, VRegister vs2, VRegister vs1, \
+ MaskType mask = NoMask);
+
+# define DEFINE_OPFRED(name, funct6) \
+ void name##_vs(VRegister vd, VRegister vs2, VRegister vs1, \
+ MaskType mask = NoMask);
+
+# define DEFINE_OPFVF(name, funct6) \
+ void name##_vf(VRegister vd, VRegister vs2, FPURegister fs1, \
+ MaskType mask = NoMask);
+
+# define DEFINE_OPFWF(name, funct6) \
+ void name##_wf(VRegister vd, VRegister vs2, FPURegister fs1, \
+ MaskType mask = NoMask);
+
+# define DEFINE_OPFVV_FMA(name, funct6) \
+ void name##_vv(VRegister vd, VRegister vs1, VRegister vs2, \
+ MaskType mask = NoMask);
+
+# define DEFINE_OPFVF_FMA(name, funct6) \
+ void name##_vf(VRegister vd, FPURegister fs1, VRegister vs2, \
+ MaskType mask = NoMask);
+
+# define DEFINE_OPMVV_VIE(name) \
+ void name(VRegister vd, VRegister vs2, MaskType mask = NoMask);
+
+ DEFINE_OPIVV(vadd, VADD_FUNCT6)
+ DEFINE_OPIVX(vadd, VADD_FUNCT6)
+ DEFINE_OPIVI(vadd, VADD_FUNCT6)
+ DEFINE_OPIVV(vsub, VSUB_FUNCT6)
+ DEFINE_OPIVX(vsub, VSUB_FUNCT6)
+ DEFINE_OPMVX(vdiv, VDIV_FUNCT6)
+ DEFINE_OPMVX(vdivu, VDIVU_FUNCT6)
+ DEFINE_OPMVX(vmul, VMUL_FUNCT6)
+ DEFINE_OPMVX(vmulhu, VMULHU_FUNCT6)
+ DEFINE_OPMVX(vmulhsu, VMULHSU_FUNCT6)
+ DEFINE_OPMVX(vmulh, VMULH_FUNCT6)
+ DEFINE_OPMVV(vdiv, VDIV_FUNCT6)
+ DEFINE_OPMVV(vdivu, VDIVU_FUNCT6)
+ DEFINE_OPMVV(vmul, VMUL_FUNCT6)
+ DEFINE_OPMVV(vmulhu, VMULHU_FUNCT6)
+ DEFINE_OPMVV(vmulhsu, VMULHSU_FUNCT6)
+ DEFINE_OPMVV(vmulh, VMULH_FUNCT6)
+ DEFINE_OPMVV(vwmul, VWMUL_FUNCT6)
+ DEFINE_OPMVV(vwmulu, VWMULU_FUNCT6)
+ DEFINE_OPMVV(vwaddu, VWADDU_FUNCT6)
+ DEFINE_OPMVV(vwadd, VWADD_FUNCT6)
+ DEFINE_OPMVV(vcompress, VCOMPRESS_FUNCT6)
+ DEFINE_OPIVX(vsadd, VSADD_FUNCT6)
+ DEFINE_OPIVV(vsadd, VSADD_FUNCT6)
+ DEFINE_OPIVI(vsadd, VSADD_FUNCT6)
+ DEFINE_OPIVX(vsaddu, VSADD_FUNCT6)
+ DEFINE_OPIVV(vsaddu, VSADDU_FUNCT6)
+ DEFINE_OPIVI(vsaddu, VSADDU_FUNCT6)
+ DEFINE_OPIVX(vssub, VSSUB_FUNCT6)
+ DEFINE_OPIVV(vssub, VSSUB_FUNCT6)
+ DEFINE_OPIVX(vssubu, VSSUBU_FUNCT6)
+ DEFINE_OPIVV(vssubu, VSSUBU_FUNCT6)
+ DEFINE_OPIVX(vrsub, VRSUB_FUNCT6)
+ DEFINE_OPIVI(vrsub, VRSUB_FUNCT6)
+ DEFINE_OPIVV(vminu, VMINU_FUNCT6)
+ DEFINE_OPIVX(vminu, VMINU_FUNCT6)
+ DEFINE_OPIVV(vmin, VMIN_FUNCT6)
+ DEFINE_OPIVX(vmin, VMIN_FUNCT6)
+ DEFINE_OPIVV(vmaxu, VMAXU_FUNCT6)
+ DEFINE_OPIVX(vmaxu, VMAXU_FUNCT6)
+ DEFINE_OPIVV(vmax, VMAX_FUNCT6)
+ DEFINE_OPIVX(vmax, VMAX_FUNCT6)
+ DEFINE_OPIVV(vand, VAND_FUNCT6)
+ DEFINE_OPIVX(vand, VAND_FUNCT6)
+ DEFINE_OPIVI(vand, VAND_FUNCT6)
+ DEFINE_OPIVV(vor, VOR_FUNCT6)
+ DEFINE_OPIVX(vor, VOR_FUNCT6)
+ DEFINE_OPIVI(vor, VOR_FUNCT6)
+ DEFINE_OPIVV(vxor, VXOR_FUNCT6)
+ DEFINE_OPIVX(vxor, VXOR_FUNCT6)
+ DEFINE_OPIVI(vxor, VXOR_FUNCT6)
+ DEFINE_OPIVV(vrgather, VRGATHER_FUNCT6)
+ DEFINE_OPIVX(vrgather, VRGATHER_FUNCT6)
+ DEFINE_OPIVI(vrgather, VRGATHER_FUNCT6)
+
+ DEFINE_OPIVX(vslidedown, VSLIDEDOWN_FUNCT6)
+ DEFINE_OPIVI(vslidedown, VSLIDEDOWN_FUNCT6)
+ DEFINE_OPIVX(vslideup, VSLIDEUP_FUNCT6)
+ DEFINE_OPIVI(vslideup, VSLIDEUP_FUNCT6)
+
+ DEFINE_OPIVV(vmseq, VMSEQ_FUNCT6)
+ DEFINE_OPIVX(vmseq, VMSEQ_FUNCT6)
+ DEFINE_OPIVI(vmseq, VMSEQ_FUNCT6)
+
+ DEFINE_OPIVV(vmsne, VMSNE_FUNCT6)
+ DEFINE_OPIVX(vmsne, VMSNE_FUNCT6)
+ DEFINE_OPIVI(vmsne, VMSNE_FUNCT6)
+
+ DEFINE_OPIVV(vmsltu, VMSLTU_FUNCT6)
+ DEFINE_OPIVX(vmsltu, VMSLTU_FUNCT6)
+
+ DEFINE_OPIVV(vmslt, VMSLT_FUNCT6)
+ DEFINE_OPIVX(vmslt, VMSLT_FUNCT6)
+
+ DEFINE_OPIVV(vmsle, VMSLE_FUNCT6)
+ DEFINE_OPIVX(vmsle, VMSLE_FUNCT6)
+ DEFINE_OPIVI(vmsle, VMSLE_FUNCT6)
+
+ DEFINE_OPIVV(vmsleu, VMSLEU_FUNCT6)
+ DEFINE_OPIVX(vmsleu, VMSLEU_FUNCT6)
+ DEFINE_OPIVI(vmsleu, VMSLEU_FUNCT6)
+
+ DEFINE_OPIVI(vmsgt, VMSGT_FUNCT6)
+ DEFINE_OPIVX(vmsgt, VMSGT_FUNCT6)
+
+ DEFINE_OPIVI(vmsgtu, VMSGTU_FUNCT6)
+ DEFINE_OPIVX(vmsgtu, VMSGTU_FUNCT6)
+
+ DEFINE_OPIVV(vsrl, VSRL_FUNCT6)
+ DEFINE_OPIVX(vsrl, VSRL_FUNCT6)
+ DEFINE_OPIVI(vsrl, VSRL_FUNCT6)
+
+ DEFINE_OPIVV(vsra, VSRA_FUNCT6)
+ DEFINE_OPIVX(vsra, VSRA_FUNCT6)
+ DEFINE_OPIVI(vsra, VSRA_FUNCT6)
+
+ DEFINE_OPIVV(vsll, VSLL_FUNCT6)
+ DEFINE_OPIVX(vsll, VSLL_FUNCT6)
+ DEFINE_OPIVI(vsll, VSLL_FUNCT6)
+
+ DEFINE_OPIVV(vsmul, VSMUL_FUNCT6)
+ DEFINE_OPIVX(vsmul, VSMUL_FUNCT6)
+
+ DEFINE_OPFVV(vfadd, VFADD_FUNCT6)
+ DEFINE_OPFVF(vfadd, VFADD_FUNCT6)
+ DEFINE_OPFVV(vfsub, VFSUB_FUNCT6)
+ DEFINE_OPFVF(vfsub, VFSUB_FUNCT6)
+ DEFINE_OPFVV(vfdiv, VFDIV_FUNCT6)
+ DEFINE_OPFVF(vfdiv, VFDIV_FUNCT6)
+ DEFINE_OPFVV(vfmul, VFMUL_FUNCT6)
+ DEFINE_OPFVF(vfmul, VFMUL_FUNCT6)
+
+ // Vector Widening Floating-Point Add/Subtract Instructions
+ DEFINE_OPFVV(vfwadd, VFWADD_FUNCT6)
+ DEFINE_OPFVF(vfwadd, VFWADD_FUNCT6)
+ DEFINE_OPFVV(vfwsub, VFWSUB_FUNCT6)
+ DEFINE_OPFVF(vfwsub, VFWSUB_FUNCT6)
+ DEFINE_OPFWV(vfwadd, VFWADD_W_FUNCT6)
+ DEFINE_OPFWF(vfwadd, VFWADD_W_FUNCT6)
+ DEFINE_OPFWV(vfwsub, VFWSUB_W_FUNCT6)
+ DEFINE_OPFWF(vfwsub, VFWSUB_W_FUNCT6)
+
+ // Vector Widening Floating-Point Reduction Instructions
+ DEFINE_OPFVV(vfwredusum, VFWREDUSUM_FUNCT6)
+ DEFINE_OPFVV(vfwredosum, VFWREDOSUM_FUNCT6)
+
+ // Vector Widening Floating-Point Multiply
+ DEFINE_OPFVV(vfwmul, VFWMUL_FUNCT6)
+ DEFINE_OPFVF(vfwmul, VFWMUL_FUNCT6)
+
+ DEFINE_OPFVV(vmfeq, VMFEQ_FUNCT6)
+ DEFINE_OPFVV(vmfne, VMFNE_FUNCT6)
+ DEFINE_OPFVV(vmflt, VMFLT_FUNCT6)
+ DEFINE_OPFVV(vmfle, VMFLE_FUNCT6)
+ DEFINE_OPFVV(vfmax, VMFMAX_FUNCT6)
+ DEFINE_OPFVV(vfmin, VMFMIN_FUNCT6)
+ DEFINE_OPFRED(vfredmax, VFREDMAX_FUNCT6)
+
+ DEFINE_OPFVV(vfsngj, VFSGNJ_FUNCT6)
+ DEFINE_OPFVF(vfsngj, VFSGNJ_FUNCT6)
+ DEFINE_OPFVV(vfsngjn, VFSGNJN_FUNCT6)
+ DEFINE_OPFVF(vfsngjn, VFSGNJN_FUNCT6)
+ DEFINE_OPFVV(vfsngjx, VFSGNJX_FUNCT6)
+ DEFINE_OPFVF(vfsngjx, VFSGNJX_FUNCT6)
+
+ // Vector Single-Width Floating-Point Fused Multiply-Add Instructions
+ DEFINE_OPFVV_FMA(vfmadd, VFMADD_FUNCT6)
+ DEFINE_OPFVF_FMA(vfmadd, VFMADD_FUNCT6)
+ DEFINE_OPFVV_FMA(vfmsub, VFMSUB_FUNCT6)
+ DEFINE_OPFVF_FMA(vfmsub, VFMSUB_FUNCT6)
+ DEFINE_OPFVV_FMA(vfmacc, VFMACC_FUNCT6)
+ DEFINE_OPFVF_FMA(vfmacc, VFMACC_FUNCT6)
+ DEFINE_OPFVV_FMA(vfmsac, VFMSAC_FUNCT6)
+ DEFINE_OPFVF_FMA(vfmsac, VFMSAC_FUNCT6)
+ DEFINE_OPFVV_FMA(vfnmadd, VFNMADD_FUNCT6)
+ DEFINE_OPFVF_FMA(vfnmadd, VFNMADD_FUNCT6)
+ DEFINE_OPFVV_FMA(vfnmsub, VFNMSUB_FUNCT6)
+ DEFINE_OPFVF_FMA(vfnmsub, VFNMSUB_FUNCT6)
+ DEFINE_OPFVV_FMA(vfnmacc, VFNMACC_FUNCT6)
+ DEFINE_OPFVF_FMA(vfnmacc, VFNMACC_FUNCT6)
+ DEFINE_OPFVV_FMA(vfnmsac, VFNMSAC_FUNCT6)
+ DEFINE_OPFVF_FMA(vfnmsac, VFNMSAC_FUNCT6)
+
+ // Vector Widening Floating-Point Fused Multiply-Add Instructions
+ DEFINE_OPFVV_FMA(vfwmacc, VFWMACC_FUNCT6)
+ DEFINE_OPFVF_FMA(vfwmacc, VFWMACC_FUNCT6)
+ DEFINE_OPFVV_FMA(vfwnmacc, VFWNMACC_FUNCT6)
+ DEFINE_OPFVF_FMA(vfwnmacc, VFWNMACC_FUNCT6)
+ DEFINE_OPFVV_FMA(vfwmsac, VFWMSAC_FUNCT6)
+ DEFINE_OPFVF_FMA(vfwmsac, VFWMSAC_FUNCT6)
+ DEFINE_OPFVV_FMA(vfwnmsac, VFWNMSAC_FUNCT6)
+ DEFINE_OPFVF_FMA(vfwnmsac, VFWNMSAC_FUNCT6)
+
+ // Vector Narrowing Fixed-Point Clip Instructions
+ DEFINE_OPIVV(vnclip, VNCLIP_FUNCT6)
+ DEFINE_OPIVX(vnclip, VNCLIP_FUNCT6)
+ DEFINE_OPIVI(vnclip, VNCLIP_FUNCT6)
+ DEFINE_OPIVV(vnclipu, VNCLIPU_FUNCT6)
+ DEFINE_OPIVX(vnclipu, VNCLIPU_FUNCT6)
+ DEFINE_OPIVI(vnclipu, VNCLIPU_FUNCT6)
+
+ // Vector Integer Extension
+ DEFINE_OPMVV_VIE(vzext_vf8)
+ DEFINE_OPMVV_VIE(vsext_vf8)
+ DEFINE_OPMVV_VIE(vzext_vf4)
+ DEFINE_OPMVV_VIE(vsext_vf4)
+ DEFINE_OPMVV_VIE(vzext_vf2)
+ DEFINE_OPMVV_VIE(vsext_vf2)
+
+# undef DEFINE_OPIVI
+# undef DEFINE_OPIVV
+# undef DEFINE_OPIVX
+# undef DEFINE_OPMVV
+# undef DEFINE_OPMVX
+# undef DEFINE_OPFVV
+# undef DEFINE_OPFWV
+# undef DEFINE_OPFVF
+# undef DEFINE_OPFWF
+# undef DEFINE_OPFVV_FMA
+# undef DEFINE_OPFVF_FMA
+# undef DEFINE_OPMVV_VIE
+# undef DEFINE_OPFRED
+
+# define DEFINE_VFUNARY(name, funct6, vs1) \
+ void name(VRegister vd, VRegister vs2, MaskType mask = NoMask) { \
+ GenInstrV(funct6, OP_FVV, vd, vs1, vs2, mask); \
+ }
+
+ DEFINE_VFUNARY(vfcvt_xu_f_v, VFUNARY0_FUNCT6, VFCVT_XU_F_V)
+ DEFINE_VFUNARY(vfcvt_x_f_v, VFUNARY0_FUNCT6, VFCVT_X_F_V)
+ DEFINE_VFUNARY(vfcvt_f_x_v, VFUNARY0_FUNCT6, VFCVT_F_X_V)
+ DEFINE_VFUNARY(vfcvt_f_xu_v, VFUNARY0_FUNCT6, VFCVT_F_XU_V)
+ DEFINE_VFUNARY(vfwcvt_xu_f_v, VFUNARY0_FUNCT6, VFWCVT_XU_F_V)
+ DEFINE_VFUNARY(vfwcvt_x_f_v, VFUNARY0_FUNCT6, VFWCVT_X_F_V)
+ DEFINE_VFUNARY(vfwcvt_f_x_v, VFUNARY0_FUNCT6, VFWCVT_F_X_V)
+ DEFINE_VFUNARY(vfwcvt_f_xu_v, VFUNARY0_FUNCT6, VFWCVT_F_XU_V)
+ DEFINE_VFUNARY(vfwcvt_f_f_v, VFUNARY0_FUNCT6, VFWCVT_F_F_V)
+
+ DEFINE_VFUNARY(vfncvt_f_f_w, VFUNARY0_FUNCT6, VFNCVT_F_F_W)
+ DEFINE_VFUNARY(vfncvt_x_f_w, VFUNARY0_FUNCT6, VFNCVT_X_F_W)
+ DEFINE_VFUNARY(vfncvt_xu_f_w, VFUNARY0_FUNCT6, VFNCVT_XU_F_W)
+
+ DEFINE_VFUNARY(vfclass_v, VFUNARY1_FUNCT6, VFCLASS_V)
+ DEFINE_VFUNARY(vfsqrt_v, VFUNARY1_FUNCT6, VFSQRT_V)
+ DEFINE_VFUNARY(vfrsqrt7_v, VFUNARY1_FUNCT6, VFRSQRT7_V)
+ DEFINE_VFUNARY(vfrec7_v, VFUNARY1_FUNCT6, VFREC7_V)
+# undef DEFINE_VFUNARY
+
+ void vnot_vv(VRegister dst, VRegister src, MaskType mask = NoMask) {
+ vxor_vi(dst, src, -1, mask);
+ }
+
+ void vneg_vv(VRegister dst, VRegister src, MaskType mask = NoMask) {
+ vrsub_vx(dst, src, zero_reg, mask);
+ }
+
+ void vfneg_vv(VRegister dst, VRegister src, MaskType mask = NoMask) {
+ vfsngjn_vv(dst, src, src, mask);
+ }
+ void vfabs_vv(VRegister dst, VRegister src, MaskType mask = NoMask) {
+ vfsngjx_vv(dst, src, src, mask);
+ }
+ void vfirst_m(Register rd, VRegister vs2, MaskType mask = NoMask);
+
+ void vcpop_m(Register rd, VRegister vs2, MaskType mask = NoMask);
+
+ protected:
+ void vsetvli(Register rd, Register rs1, VSew vsew, Vlmul vlmul,
+ TailAgnosticType tail = tu, MaskAgnosticType mask = mu);
+
+ void vsetivli(Register rd, uint8_t uimm, VSew vsew, Vlmul vlmul,
+ TailAgnosticType tail = tu, MaskAgnosticType mask = mu);
+
+ inline void vsetvlmax(Register rd, VSew vsew, Vlmul vlmul,
+ TailAgnosticType tail = tu,
+ MaskAgnosticType mask = mu) {
+ vsetvli(rd, zero_reg, vsew, vlmul, tu, mu);
+ }
+
+ inline void vsetvl(VSew vsew, Vlmul vlmul, TailAgnosticType tail = tu,
+ MaskAgnosticType mask = mu) {
+ vsetvli(zero_reg, zero_reg, vsew, vlmul, tu, mu);
+ }
+
+ void vsetvl(Register rd, Register rs1, Register rs2);
+
+ // ----------------------------RVV------------------------------------------
+ // vsetvl
+ void GenInstrV(Register rd, Register rs1, Register rs2);
+ // vsetvli
+ void GenInstrV(Register rd, Register rs1, uint32_t zimm);
+ // OPIVV OPFVV OPMVV
+ void GenInstrV(uint8_t funct6, OpcodeRISCVV opcode, VRegister vd,
+ VRegister vs1, VRegister vs2, MaskType mask = NoMask);
+ void GenInstrV(uint8_t funct6, OpcodeRISCVV opcode, VRegister vd, int8_t vs1,
+ VRegister vs2, MaskType mask = NoMask);
+ void GenInstrV(uint8_t funct6, OpcodeRISCVV opcode, VRegister vd,
+ VRegister vs2, MaskType mask = NoMask);
+ // OPMVV OPFVV
+ void GenInstrV(uint8_t funct6, OpcodeRISCVV opcode, Register rd,
+ VRegister vs1, VRegister vs2, MaskType mask = NoMask);
+ // OPFVV
+ void GenInstrV(uint8_t funct6, OpcodeRISCVV opcode, FPURegister fd,
+ VRegister vs1, VRegister vs2, MaskType mask = NoMask);
+
+ // OPIVX OPMVX
+ void GenInstrV(uint8_t funct6, OpcodeRISCVV opcode, VRegister vd,
+ Register rs1, VRegister vs2, MaskType mask = NoMask);
+ // OPFVF
+ void GenInstrV(uint8_t funct6, OpcodeRISCVV opcode, VRegister vd,
+ FPURegister fs1, VRegister vs2, MaskType mask = NoMask);
+ // OPMVX
+ void GenInstrV(uint8_t funct6, Register rd, Register rs1, VRegister vs2,
+ MaskType mask = NoMask);
+ // OPIVI
+ void GenInstrV(uint8_t funct6, VRegister vd, int8_t simm5, VRegister vs2,
+ MaskType mask = NoMask);
+
+ // VL VS
+ void GenInstrV(BaseOpcode opcode, uint8_t width, VRegister vd, Register rs1,
+ uint8_t umop, MaskType mask, uint8_t IsMop, bool IsMew,
+ uint8_t Nf);
+
+ void GenInstrV(BaseOpcode opcode, uint8_t width, VRegister vd, Register rs1,
+ Register rs2, MaskType mask, uint8_t IsMop, bool IsMew,
+ uint8_t Nf);
+ // VL VS AMO
+ void GenInstrV(BaseOpcode opcode, uint8_t width, VRegister vd, Register rs1,
+ VRegister vs2, MaskType mask, uint8_t IsMop, bool IsMew,
+ uint8_t Nf);
+ // vmv_xs vcpop_m vfirst_m
+ void GenInstrV(uint8_t funct6, OpcodeRISCVV opcode, Register rd, uint8_t vs1,
+ VRegister vs2, MaskType mask);
+};
+
+class LoadStoreLaneParams {
+ public:
+ int sz;
+ uint8_t laneidx;
+
+ LoadStoreLaneParams(MachineRepresentation rep, uint8_t laneidx);
+
+ private:
+ LoadStoreLaneParams(uint8_t laneidx, int sz, int lanes)
+ : sz(sz), laneidx(laneidx % lanes) {}
+};
+} // namespace jit
+} // namespace js
+#endif
+#endif // jit_riscv64_extension_Extension_riscv_V_h_
diff --git a/js/src/jit/riscv64/extension/extension-riscv-zicsr.cc b/js/src/jit/riscv64/extension/extension-riscv-zicsr.cc
new file mode 100644
index 0000000000..7fa87393a3
--- /dev/null
+++ b/js/src/jit/riscv64/extension/extension-riscv-zicsr.cc
@@ -0,0 +1,44 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#include "jit/riscv64/extension/extension-riscv-zicsr.h"
+
+#include "jit/riscv64/constant/Constant-riscv64.h"
+#include "jit/riscv64/Register-riscv64.h"
+#include "jit/riscv64/Assembler-riscv64.h"
+#include "jit/riscv64/Architecture-riscv64.h"
+namespace js {
+namespace jit {
+
+void AssemblerRISCVZicsr::csrrw(Register rd, ControlStatusReg csr,
+ Register rs1) {
+ GenInstrCSR_ir(0b001, rd, csr, rs1);
+}
+
+void AssemblerRISCVZicsr::csrrs(Register rd, ControlStatusReg csr,
+ Register rs1) {
+ GenInstrCSR_ir(0b010, rd, csr, rs1);
+}
+
+void AssemblerRISCVZicsr::csrrc(Register rd, ControlStatusReg csr,
+ Register rs1) {
+ GenInstrCSR_ir(0b011, rd, csr, rs1);
+}
+
+void AssemblerRISCVZicsr::csrrwi(Register rd, ControlStatusReg csr,
+ uint8_t imm5) {
+ GenInstrCSR_ii(0b101, rd, csr, imm5);
+}
+
+void AssemblerRISCVZicsr::csrrsi(Register rd, ControlStatusReg csr,
+ uint8_t imm5) {
+ GenInstrCSR_ii(0b110, rd, csr, imm5);
+}
+
+void AssemblerRISCVZicsr::csrrci(Register rd, ControlStatusReg csr,
+ uint8_t imm5) {
+ GenInstrCSR_ii(0b111, rd, csr, imm5);
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/riscv64/extension/extension-riscv-zicsr.h b/js/src/jit/riscv64/extension/extension-riscv-zicsr.h
new file mode 100644
index 0000000000..e1fba4fa57
--- /dev/null
+++ b/js/src/jit/riscv64/extension/extension-riscv-zicsr.h
@@ -0,0 +1,57 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef jit_riscv64_extension_Extension_riscv_zicsr_h_
+#define jit_riscv64_extension_Extension_riscv_zicsr_h_
+#include "mozilla/Assertions.h"
+
+#include <stdint.h>
+
+#include "jit/riscv64/extension/base-assembler-riscv.h"
+#include "jit/riscv64/Register-riscv64.h"
+namespace js {
+namespace jit {
+
+class AssemblerRISCVZicsr : public AssemblerRiscvBase {
+ public:
+ // CSR
+ void csrrw(Register rd, ControlStatusReg csr, Register rs1);
+ void csrrs(Register rd, ControlStatusReg csr, Register rs1);
+ void csrrc(Register rd, ControlStatusReg csr, Register rs1);
+ void csrrwi(Register rd, ControlStatusReg csr, uint8_t imm5);
+ void csrrsi(Register rd, ControlStatusReg csr, uint8_t imm5);
+ void csrrci(Register rd, ControlStatusReg csr, uint8_t imm5);
+
+ // Read instructions-retired counter
+ void rdinstret(Register rd) { csrrs(rd, csr_instret, zero_reg); }
+ void rdinstreth(Register rd) { csrrs(rd, csr_instreth, zero_reg); }
+ void rdcycle(Register rd) { csrrs(rd, csr_cycle, zero_reg); }
+ void rdcycleh(Register rd) { csrrs(rd, csr_cycleh, zero_reg); }
+ void rdtime(Register rd) { csrrs(rd, csr_time, zero_reg); }
+ void rdtimeh(Register rd) { csrrs(rd, csr_timeh, zero_reg); }
+
+ void csrr(Register rd, ControlStatusReg csr) { csrrs(rd, csr, zero_reg); }
+ void csrw(ControlStatusReg csr, Register rs) { csrrw(zero_reg, csr, rs); }
+ void csrs(ControlStatusReg csr, Register rs) { csrrs(zero_reg, csr, rs); }
+ void csrc(ControlStatusReg csr, Register rs) { csrrc(zero_reg, csr, rs); }
+
+ void csrwi(ControlStatusReg csr, uint8_t imm) { csrrwi(zero_reg, csr, imm); }
+ void csrsi(ControlStatusReg csr, uint8_t imm) { csrrsi(zero_reg, csr, imm); }
+ void csrci(ControlStatusReg csr, uint8_t imm) { csrrci(zero_reg, csr, imm); }
+
+ void frcsr(Register rd) { csrrs(rd, csr_fcsr, zero_reg); }
+ void fscsr(Register rd, Register rs) { csrrw(rd, csr_fcsr, rs); }
+ void fscsr(Register rs) { csrrw(zero_reg, csr_fcsr, rs); }
+
+ void frrm(Register rd) { csrrs(rd, csr_frm, zero_reg); }
+ void fsrm(Register rd, Register rs) { csrrw(rd, csr_frm, rs); }
+ void fsrm(Register rs) { csrrw(zero_reg, csr_frm, rs); }
+
+ void frflags(Register rd) { csrrs(rd, csr_fflags, zero_reg); }
+ void fsflags(Register rd, Register rs) { csrrw(rd, csr_fflags, rs); }
+ void fsflags(Register rs) { csrrw(zero_reg, csr_fflags, rs); }
+};
+} // namespace jit
+} // namespace js
+#endif // jit_riscv64_extension_Extension_riscv_zicsr_h_
diff --git a/js/src/jit/riscv64/extension/extension-riscv-zifencei.cc b/js/src/jit/riscv64/extension/extension-riscv-zifencei.cc
new file mode 100644
index 0000000000..ec8080b0cb
--- /dev/null
+++ b/js/src/jit/riscv64/extension/extension-riscv-zifencei.cc
@@ -0,0 +1,17 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#include "jit/riscv64/extension/extension-riscv-zifencei.h"
+
+#include "jit/riscv64/extension/base-assembler-riscv.h"
+#include "jit/riscv64/constant/Constant-riscv64.h"
+#include "jit/riscv64/Assembler-riscv64.h"
+#include "jit/riscv64/Architecture-riscv64.h"
+namespace js {
+namespace jit {
+
+void AssemblerRISCVZifencei::fence_i() {
+ GenInstrI(0b001, MISC_MEM, ToRegister(0), ToRegister(0), 0);
+}
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/riscv64/extension/extension-riscv-zifencei.h b/js/src/jit/riscv64/extension/extension-riscv-zifencei.h
new file mode 100644
index 0000000000..a245320ec4
--- /dev/null
+++ b/js/src/jit/riscv64/extension/extension-riscv-zifencei.h
@@ -0,0 +1,20 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef jit_riscv64_extension_Extension_riscv_zifencei_h_
+#define jit_riscv64_extension_Extension_riscv_zifencei_h_
+#include "mozilla/Assertions.h"
+
+#include <stdint.h>
+
+#include "jit/riscv64/extension/base-assembler-riscv.h"
+namespace js {
+namespace jit {
+class AssemblerRISCVZifencei : public AssemblerRiscvBase {
+ public:
+ void fence_i();
+};
+} // namespace jit
+} // namespace js
+#endif // jit_riscv64_extension_Extension_riscv_zifencei_h_