summaryrefslogtreecommitdiffstats
path: root/js/src/jit/riscv64/extension/base-riscv-i.cc
diff options
context:
space:
mode:
Diffstat (limited to 'js/src/jit/riscv64/extension/base-riscv-i.cc')
-rw-r--r--js/src/jit/riscv64/extension/base-riscv-i.cc351
1 files changed, 351 insertions, 0 deletions
diff --git a/js/src/jit/riscv64/extension/base-riscv-i.cc b/js/src/jit/riscv64/extension/base-riscv-i.cc
new file mode 100644
index 0000000000..2ee8877eb1
--- /dev/null
+++ b/js/src/jit/riscv64/extension/base-riscv-i.cc
@@ -0,0 +1,351 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#include "jit/riscv64/extension/base-riscv-i.h"
+#include "jit/riscv64/constant/Constant-riscv64.h"
+#include "jit/riscv64/Assembler-riscv64.h"
+#include "jit/riscv64/Architecture-riscv64.h"
+namespace js {
+namespace jit {
+
+void AssemblerRISCVI::lui(Register rd, int32_t imm20) {
+ GenInstrU(LUI, rd, imm20);
+}
+
+void AssemblerRISCVI::auipc(Register rd, int32_t imm20) {
+ GenInstrU(AUIPC, rd, imm20);
+}
+
+// Jumps
+
+void AssemblerRISCVI::jal(Register rd, int32_t imm21) {
+ GenInstrJ(JAL, rd, imm21);
+}
+
+void AssemblerRISCVI::jalr(Register rd, Register rs1, int16_t imm12) {
+ GenInstrI(0b000, JALR, rd, rs1, imm12);
+}
+
+// Branches
+
+void AssemblerRISCVI::beq(Register rs1, Register rs2, int16_t imm13) {
+ GenInstrBranchCC_rri(0b000, rs1, rs2, imm13);
+}
+
+void AssemblerRISCVI::bne(Register rs1, Register rs2, int16_t imm13) {
+ GenInstrBranchCC_rri(0b001, rs1, rs2, imm13);
+}
+
+void AssemblerRISCVI::blt(Register rs1, Register rs2, int16_t imm13) {
+ GenInstrBranchCC_rri(0b100, rs1, rs2, imm13);
+}
+
+void AssemblerRISCVI::bge(Register rs1, Register rs2, int16_t imm13) {
+ GenInstrBranchCC_rri(0b101, rs1, rs2, imm13);
+}
+
+void AssemblerRISCVI::bltu(Register rs1, Register rs2, int16_t imm13) {
+ GenInstrBranchCC_rri(0b110, rs1, rs2, imm13);
+}
+
+void AssemblerRISCVI::bgeu(Register rs1, Register rs2, int16_t imm13) {
+ GenInstrBranchCC_rri(0b111, rs1, rs2, imm13);
+}
+
+// Loads
+
+void AssemblerRISCVI::lb(Register rd, Register rs1, int16_t imm12) {
+ GenInstrLoad_ri(0b000, rd, rs1, imm12);
+}
+
+void AssemblerRISCVI::lh(Register rd, Register rs1, int16_t imm12) {
+ GenInstrLoad_ri(0b001, rd, rs1, imm12);
+}
+
+void AssemblerRISCVI::lw(Register rd, Register rs1, int16_t imm12) {
+ GenInstrLoad_ri(0b010, rd, rs1, imm12);
+}
+
+void AssemblerRISCVI::lbu(Register rd, Register rs1, int16_t imm12) {
+ GenInstrLoad_ri(0b100, rd, rs1, imm12);
+}
+
+void AssemblerRISCVI::lhu(Register rd, Register rs1, int16_t imm12) {
+ GenInstrLoad_ri(0b101, rd, rs1, imm12);
+}
+
+// Stores
+
+void AssemblerRISCVI::sb(Register source, Register base, int16_t imm12) {
+ GenInstrStore_rri(0b000, base, source, imm12);
+}
+
+void AssemblerRISCVI::sh(Register source, Register base, int16_t imm12) {
+ GenInstrStore_rri(0b001, base, source, imm12);
+}
+
+void AssemblerRISCVI::sw(Register source, Register base, int16_t imm12) {
+ GenInstrStore_rri(0b010, base, source, imm12);
+}
+
+// Arithmetic with immediate
+
+void AssemblerRISCVI::addi(Register rd, Register rs1, int16_t imm12) {
+ GenInstrALU_ri(0b000, rd, rs1, imm12);
+}
+
+void AssemblerRISCVI::slti(Register rd, Register rs1, int16_t imm12) {
+ GenInstrALU_ri(0b010, rd, rs1, imm12);
+}
+
+void AssemblerRISCVI::sltiu(Register rd, Register rs1, int16_t imm12) {
+ GenInstrALU_ri(0b011, rd, rs1, imm12);
+}
+
+void AssemblerRISCVI::xori(Register rd, Register rs1, int16_t imm12) {
+ GenInstrALU_ri(0b100, rd, rs1, imm12);
+}
+
+void AssemblerRISCVI::ori(Register rd, Register rs1, int16_t imm12) {
+ GenInstrALU_ri(0b110, rd, rs1, imm12);
+}
+
+void AssemblerRISCVI::andi(Register rd, Register rs1, int16_t imm12) {
+ GenInstrALU_ri(0b111, rd, rs1, imm12);
+}
+
+void AssemblerRISCVI::slli(Register rd, Register rs1, uint8_t shamt) {
+ GenInstrShift_ri(0, 0b001, rd, rs1, shamt & 0x3f);
+}
+
+void AssemblerRISCVI::srli(Register rd, Register rs1, uint8_t shamt) {
+ GenInstrShift_ri(0, 0b101, rd, rs1, shamt & 0x3f);
+}
+
+void AssemblerRISCVI::srai(Register rd, Register rs1, uint8_t shamt) {
+ GenInstrShift_ri(1, 0b101, rd, rs1, shamt & 0x3f);
+}
+
+// Arithmetic
+
+void AssemblerRISCVI::add(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000000, 0b000, rd, rs1, rs2);
+}
+
+void AssemblerRISCVI::sub(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0100000, 0b000, rd, rs1, rs2);
+}
+
+void AssemblerRISCVI::sll(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000000, 0b001, rd, rs1, rs2);
+}
+
+void AssemblerRISCVI::slt(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000000, 0b010, rd, rs1, rs2);
+}
+
+void AssemblerRISCVI::sltu(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000000, 0b011, rd, rs1, rs2);
+}
+
+void AssemblerRISCVI::xor_(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000000, 0b100, rd, rs1, rs2);
+}
+
+void AssemblerRISCVI::srl(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000000, 0b101, rd, rs1, rs2);
+}
+
+void AssemblerRISCVI::sra(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0100000, 0b101, rd, rs1, rs2);
+}
+
+void AssemblerRISCVI::or_(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000000, 0b110, rd, rs1, rs2);
+}
+
+void AssemblerRISCVI::and_(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000000, 0b111, rd, rs1, rs2);
+}
+
+// Memory fences
+
+void AssemblerRISCVI::fence(uint8_t pred, uint8_t succ) {
+ MOZ_ASSERT(is_uint4(pred) && is_uint4(succ));
+ uint16_t imm12 = succ | (pred << 4) | (0b0000 << 8);
+ GenInstrI(0b000, MISC_MEM, ToRegister(0UL), ToRegister(0UL), imm12);
+}
+
+void AssemblerRISCVI::fence_tso() {
+ uint16_t imm12 = (0b0011) | (0b0011 << 4) | (0b1000 << 8);
+ GenInstrI(0b000, MISC_MEM, ToRegister(0UL), ToRegister(0UL), imm12);
+}
+
+// Environment call / break
+
+void AssemblerRISCVI::ecall() {
+ GenInstrI(0b000, SYSTEM, ToRegister(0UL), ToRegister(0UL), 0);
+}
+
+void AssemblerRISCVI::ebreak() {
+ GenInstrI(0b000, SYSTEM, ToRegister(0UL), ToRegister(0UL), 1);
+}
+
+// This is a de facto standard (as set by GNU binutils) 32-bit unimplemented
+// instruction (i.e., it should always trap, if your implementation has invalid
+// instruction traps).
+void AssemblerRISCVI::unimp() {
+ GenInstrI(0b001, SYSTEM, ToRegister(0), ToRegister(0), 0b110000000000);
+}
+
+bool AssemblerRISCVI::IsBranch(Instr instr) {
+ return (instr & kBaseOpcodeMask) == BRANCH;
+}
+
+bool AssemblerRISCVI::IsJump(Instr instr) {
+ int Op = instr & kBaseOpcodeMask;
+ return Op == JAL || Op == JALR;
+}
+
+bool AssemblerRISCVI::IsNop(Instr instr) { return instr == kNopByte; }
+
+bool AssemblerRISCVI::IsJal(Instr instr) {
+ return (instr & kBaseOpcodeMask) == JAL;
+}
+
+bool AssemblerRISCVI::IsJalr(Instr instr) {
+ return (instr & kBaseOpcodeMask) == JALR;
+}
+
+bool AssemblerRISCVI::IsLui(Instr instr) {
+ return (instr & kBaseOpcodeMask) == LUI;
+}
+bool AssemblerRISCVI::IsAuipc(Instr instr) {
+ return (instr & kBaseOpcodeMask) == AUIPC;
+}
+bool AssemblerRISCVI::IsAddi(Instr instr) {
+ return (instr & (kBaseOpcodeMask | kFunct3Mask)) == RO_ADDI;
+}
+bool AssemblerRISCVI::IsOri(Instr instr) {
+ return (instr & (kBaseOpcodeMask | kFunct3Mask)) == RO_ORI;
+}
+bool AssemblerRISCVI::IsSlli(Instr instr) {
+ return (instr & (kBaseOpcodeMask | kFunct3Mask)) == RO_SLLI;
+}
+
+int AssemblerRISCVI::JumpOffset(Instr instr) {
+ int32_t imm21 = ((instr & 0x7fe00000) >> 20) | ((instr & 0x100000) >> 9) |
+ (instr & 0xff000) | ((instr & 0x80000000) >> 11);
+ imm21 = imm21 << 11 >> 11;
+ return imm21;
+}
+
+int AssemblerRISCVI::JalrOffset(Instr instr) {
+ MOZ_ASSERT(IsJalr(instr));
+ int32_t imm12 = static_cast<int32_t>(instr & kImm12Mask) >> 20;
+ return imm12;
+}
+
+int AssemblerRISCVI::AuipcOffset(Instr instr) {
+ MOZ_ASSERT(IsAuipc(instr));
+ int32_t imm20 = static_cast<int32_t>(instr & kImm20Mask);
+ return imm20;
+}
+
+bool AssemblerRISCVI::IsLw(Instr instr) {
+ return (instr & (kBaseOpcodeMask | kFunct3Mask)) == RO_LW;
+}
+
+int AssemblerRISCVI::LoadOffset(Instr instr) {
+#if JS_CODEGEN_RISCV64
+ MOZ_ASSERT(IsLd(instr));
+#elif V8_TARGET_ARCH_RISCV32
+ MOZ_ASSERT(IsLw(instr));
+#endif
+ int32_t imm12 = static_cast<int32_t>(instr & kImm12Mask) >> 20;
+ return imm12;
+}
+
+#ifdef JS_CODEGEN_RISCV64
+
+bool AssemblerRISCVI::IsAddiw(Instr instr) {
+ return (instr & (kBaseOpcodeMask | kFunct3Mask)) == RO_ADDIW;
+}
+
+bool AssemblerRISCVI::IsLd(Instr instr) {
+ return (instr & (kBaseOpcodeMask | kFunct3Mask)) == RO_LD;
+}
+
+void AssemblerRISCVI::lwu(Register rd, Register rs1, int16_t imm12) {
+ GenInstrLoad_ri(0b110, rd, rs1, imm12);
+}
+
+void AssemblerRISCVI::ld(Register rd, Register rs1, int16_t imm12) {
+ GenInstrLoad_ri(0b011, rd, rs1, imm12);
+}
+
+void AssemblerRISCVI::sd(Register source, Register base, int16_t imm12) {
+ GenInstrStore_rri(0b011, base, source, imm12);
+}
+
+void AssemblerRISCVI::addiw(Register rd, Register rs1, int16_t imm12) {
+ GenInstrI(0b000, OP_IMM_32, rd, rs1, imm12);
+}
+
+void AssemblerRISCVI::slliw(Register rd, Register rs1, uint8_t shamt) {
+ GenInstrShiftW_ri(0, 0b001, rd, rs1, shamt & 0x1f);
+}
+
+void AssemblerRISCVI::srliw(Register rd, Register rs1, uint8_t shamt) {
+ GenInstrShiftW_ri(0, 0b101, rd, rs1, shamt & 0x1f);
+}
+
+void AssemblerRISCVI::sraiw(Register rd, Register rs1, uint8_t shamt) {
+ GenInstrShiftW_ri(1, 0b101, rd, rs1, shamt & 0x1f);
+}
+
+void AssemblerRISCVI::addw(Register rd, Register rs1, Register rs2) {
+ GenInstrALUW_rr(0b0000000, 0b000, rd, rs1, rs2);
+}
+
+void AssemblerRISCVI::subw(Register rd, Register rs1, Register rs2) {
+ GenInstrALUW_rr(0b0100000, 0b000, rd, rs1, rs2);
+}
+
+void AssemblerRISCVI::sllw(Register rd, Register rs1, Register rs2) {
+ GenInstrALUW_rr(0b0000000, 0b001, rd, rs1, rs2);
+}
+
+void AssemblerRISCVI::srlw(Register rd, Register rs1, Register rs2) {
+ GenInstrALUW_rr(0b0000000, 0b101, rd, rs1, rs2);
+}
+
+void AssemblerRISCVI::sraw(Register rd, Register rs1, Register rs2) {
+ GenInstrALUW_rr(0b0100000, 0b101, rd, rs1, rs2);
+}
+
+#endif
+
+int AssemblerRISCVI::BranchOffset(Instr instr) {
+ // | imm[12] | imm[10:5] | rs2 | rs1 | funct3 | imm[4:1|11] | opcode |
+ // 31 25 11 7
+ int32_t imm13 = ((instr & 0xf00) >> 7) | ((instr & 0x7e000000) >> 20) |
+ ((instr & 0x80) << 4) | ((instr & 0x80000000) >> 19);
+ imm13 = imm13 << 19 >> 19;
+ return imm13;
+}
+
+int AssemblerRISCVI::BrachlongOffset(Instr auipc, Instr instr_I) {
+ MOZ_ASSERT(reinterpret_cast<Instruction*>(&instr_I)->InstructionType() ==
+ InstructionBase::kIType);
+ MOZ_ASSERT(IsAuipc(auipc));
+ MOZ_ASSERT(((auipc & kRdFieldMask) >> kRdShift) ==
+ ((instr_I & kRs1FieldMask) >> kRs1Shift));
+ int32_t imm_auipc = AuipcOffset(auipc);
+ int32_t imm12 = static_cast<int32_t>(instr_I & kImm12Mask) >> 20;
+ int32_t offset = imm12 + imm_auipc;
+ return offset;
+}
+
+} // namespace jit
+} // namespace js