summaryrefslogtreecommitdiffstats
path: root/js/src/jit/x64
diff options
context:
space:
mode:
Diffstat (limited to 'js/src/jit/x64')
-rw-r--r--js/src/jit/x64/CodeGenerator-x64.cpp2
-rw-r--r--js/src/jit/x64/Lowering-x64.cpp8
-rw-r--r--js/src/jit/x64/MacroAssembler-x64.cpp45
3 files changed, 26 insertions, 29 deletions
diff --git a/js/src/jit/x64/CodeGenerator-x64.cpp b/js/src/jit/x64/CodeGenerator-x64.cpp
index 9e5319842b..86d4bca0e0 100644
--- a/js/src/jit/x64/CodeGenerator-x64.cpp
+++ b/js/src/jit/x64/CodeGenerator-x64.cpp
@@ -432,7 +432,7 @@ void CodeGenerator::visitAtomicTypedArrayElementBinop64(
// Add and Sub don't need |fetchTemp| and can save a `mov` when the value and
// output register are equal to each other.
- if (atomicOp == AtomicFetchAddOp || atomicOp == AtomicFetchSubOp) {
+ if (atomicOp == AtomicOp::Add || atomicOp == AtomicOp::Sub) {
fetchTemp = Register64::Invalid();
fetchOut = temp1;
createTemp = temp2.reg;
diff --git a/js/src/jit/x64/Lowering-x64.cpp b/js/src/jit/x64/Lowering-x64.cpp
index 55d83e3f05..9f9b1713c2 100644
--- a/js/src/jit/x64/Lowering-x64.cpp
+++ b/js/src/jit/x64/Lowering-x64.cpp
@@ -208,8 +208,8 @@ void LIRGenerator::visitAtomicTypedArrayElementBinop(
//
// For AND/OR/XOR we need to use a CMPXCHG loop with rax as a temp register.
- bool bitOp = !(ins->operation() == AtomicFetchAddOp ||
- ins->operation() == AtomicFetchSubOp);
+ bool bitOp = !(ins->operation() == AtomicOp::Add ||
+ ins->operation() == AtomicOp::Sub);
LInt64Definition temp1 = tempInt64();
LInt64Definition temp2;
@@ -427,8 +427,8 @@ void LIRGenerator::visitWasmAtomicBinopHeap(MWasmAtomicBinopHeap* ins) {
// *mem does not have the expected value, so reloading it at the
// top of the loop would be redundant.
- bool bitOp = !(ins->operation() == AtomicFetchAddOp ||
- ins->operation() == AtomicFetchSubOp);
+ bool bitOp =
+ !(ins->operation() == AtomicOp::Add || ins->operation() == AtomicOp::Sub);
bool reuseInput = false;
LAllocation value;
diff --git a/js/src/jit/x64/MacroAssembler-x64.cpp b/js/src/jit/x64/MacroAssembler-x64.cpp
index 5106e7e382..ebc8c91eaa 100644
--- a/js/src/jit/x64/MacroAssembler-x64.cpp
+++ b/js/src/jit/x64/MacroAssembler-x64.cpp
@@ -1459,7 +1459,7 @@ static void AtomicFetchOp64(MacroAssembler& masm,
Register output) {
// NOTE: the generated code must match the assembly code in gen_fetchop in
// GenerateAtomicOperations.py
- if (op == AtomicFetchAddOp) {
+ if (op == AtomicOp::Add) {
if (value != output) {
masm.movq(value, output);
}
@@ -1468,7 +1468,7 @@ static void AtomicFetchOp64(MacroAssembler& masm,
FaultingCodeOffset(masm.currentOffset()));
}
masm.lock_xaddq(output, Operand(mem));
- } else if (op == AtomicFetchSubOp) {
+ } else if (op == AtomicOp::Sub) {
if (value != output) {
masm.movq(value, output);
}
@@ -1492,13 +1492,13 @@ static void AtomicFetchOp64(MacroAssembler& masm,
masm.bind(&again);
masm.movq(rax, temp);
switch (op) {
- case AtomicFetchAndOp:
+ case AtomicOp::And:
masm.andq(value, temp);
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
masm.orq(value, temp);
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
masm.xorq(value, temp);
break;
default:
@@ -1532,19 +1532,19 @@ static void AtomicEffectOp64(MacroAssembler& masm,
FaultingCodeOffset(masm.currentOffset()));
}
switch (op) {
- case AtomicFetchAddOp:
+ case AtomicOp::Add:
masm.lock_addq(value, Operand(mem));
break;
- case AtomicFetchSubOp:
+ case AtomicOp::Sub:
masm.lock_subq(value, Operand(mem));
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
masm.lock_andq(value, Operand(mem));
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
masm.lock_orq(value, Operand(mem));
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
masm.lock_xorq(value, Operand(mem));
break;
default:
@@ -1558,8 +1558,8 @@ void MacroAssembler::wasmAtomicEffectOp64(const wasm::MemoryAccessDesc& access,
AtomicEffectOp64(*this, &access, op, value.reg, mem);
}
-void MacroAssembler::compareExchange64(const Synchronization&,
- const Address& mem, Register64 expected,
+void MacroAssembler::compareExchange64(Synchronization, const Address& mem,
+ Register64 expected,
Register64 replacement,
Register64 output) {
// NOTE: the generated code must match the assembly code in gen_cmpxchg in
@@ -1571,8 +1571,7 @@ void MacroAssembler::compareExchange64(const Synchronization&,
lock_cmpxchgq(replacement.reg, Operand(mem));
}
-void MacroAssembler::compareExchange64(const Synchronization&,
- const BaseIndex& mem,
+void MacroAssembler::compareExchange64(Synchronization, const BaseIndex& mem,
Register64 expected,
Register64 replacement,
Register64 output) {
@@ -1583,9 +1582,8 @@ void MacroAssembler::compareExchange64(const Synchronization&,
lock_cmpxchgq(replacement.reg, Operand(mem));
}
-void MacroAssembler::atomicExchange64(const Synchronization&,
- const Address& mem, Register64 value,
- Register64 output) {
+void MacroAssembler::atomicExchange64(Synchronization, const Address& mem,
+ Register64 value, Register64 output) {
// NOTE: the generated code must match the assembly code in gen_exchange in
// GenerateAtomicOperations.py
if (value != output) {
@@ -1594,33 +1592,32 @@ void MacroAssembler::atomicExchange64(const Synchronization&,
xchgq(output.reg, Operand(mem));
}
-void MacroAssembler::atomicExchange64(const Synchronization&,
- const BaseIndex& mem, Register64 value,
- Register64 output) {
+void MacroAssembler::atomicExchange64(Synchronization, const BaseIndex& mem,
+ Register64 value, Register64 output) {
if (value != output) {
movq(value.reg, output.reg);
}
xchgq(output.reg, Operand(mem));
}
-void MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op,
+void MacroAssembler::atomicFetchOp64(Synchronization sync, AtomicOp op,
Register64 value, const Address& mem,
Register64 temp, Register64 output) {
AtomicFetchOp64(*this, nullptr, op, value.reg, mem, temp.reg, output.reg);
}
-void MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op,
+void MacroAssembler::atomicFetchOp64(Synchronization sync, AtomicOp op,
Register64 value, const BaseIndex& mem,
Register64 temp, Register64 output) {
AtomicFetchOp64(*this, nullptr, op, value.reg, mem, temp.reg, output.reg);
}
-void MacroAssembler::atomicEffectOp64(const Synchronization& sync, AtomicOp op,
+void MacroAssembler::atomicEffectOp64(Synchronization sync, AtomicOp op,
Register64 value, const Address& mem) {
AtomicEffectOp64(*this, nullptr, op, value.reg, mem);
}
-void MacroAssembler::atomicEffectOp64(const Synchronization& sync, AtomicOp op,
+void MacroAssembler::atomicEffectOp64(Synchronization sync, AtomicOp op,
Register64 value, const BaseIndex& mem) {
AtomicEffectOp64(*this, nullptr, op, value.reg, mem);
}