summaryrefslogtreecommitdiffstats
path: root/js/src/jit/x86-shared
diff options
context:
space:
mode:
Diffstat (limited to 'js/src/jit/x86-shared')
-rw-r--r--js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp6
-rw-r--r--js/src/jit/x86-shared/Lowering-x86-shared.cpp4
-rw-r--r--js/src/jit/x86-shared/MacroAssembler-x86-shared-inl.h3
-rw-r--r--js/src/jit/x86-shared/MacroAssembler-x86-shared.cpp171
-rw-r--r--js/src/jit/x86-shared/MacroAssembler-x86-shared.h9
5 files changed, 86 insertions, 107 deletions
diff --git a/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp b/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
index 434a54669b..692e884f06 100644
--- a/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
+++ b/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
@@ -2078,12 +2078,6 @@ void CodeGenerator::visitAtomicTypedArrayElementBinopForEffect(
}
}
-void CodeGenerator::visitMemoryBarrier(LMemoryBarrier* ins) {
- if (ins->type() & MembarStoreLoad) {
- masm.storeLoadFence();
- }
-}
-
void CodeGeneratorX86Shared::visitOutOfLineWasmTruncateCheck(
OutOfLineWasmTruncateCheck* ool) {
FloatRegister input = ool->input();
diff --git a/js/src/jit/x86-shared/Lowering-x86-shared.cpp b/js/src/jit/x86-shared/Lowering-x86-shared.cpp
index bd5986298d..6d90f2f96b 100644
--- a/js/src/jit/x86-shared/Lowering-x86-shared.cpp
+++ b/js/src/jit/x86-shared/Lowering-x86-shared.cpp
@@ -732,8 +732,8 @@ void LIRGeneratorX86Shared::lowerAtomicTypedArrayElementBinop(
// There are optimization opportunities:
// - better register allocation in the x86 8-bit case, Bug #1077036.
- bool bitOp = !(ins->operation() == AtomicFetchAddOp ||
- ins->operation() == AtomicFetchSubOp);
+ bool bitOp =
+ !(ins->operation() == AtomicOp::Add || ins->operation() == AtomicOp::Sub);
bool fixedOutput = true;
bool reuseInput = false;
LDefinition tempDef1 = LDefinition::BogusTemp();
diff --git a/js/src/jit/x86-shared/MacroAssembler-x86-shared-inl.h b/js/src/jit/x86-shared/MacroAssembler-x86-shared-inl.h
index 9848086e7f..8ce3f68224 100644
--- a/js/src/jit/x86-shared/MacroAssembler-x86-shared-inl.h
+++ b/js/src/jit/x86-shared/MacroAssembler-x86-shared-inl.h
@@ -1246,7 +1246,8 @@ template FaultingCodeOffset MacroAssembler::storeFloat32(FloatRegister src,
void MacroAssembler::memoryBarrier(MemoryBarrierBits barrier) {
if (barrier & MembarStoreLoad) {
- storeLoadFence();
+ // This implementation follows Linux.
+ masm.mfence();
}
}
diff --git a/js/src/jit/x86-shared/MacroAssembler-x86-shared.cpp b/js/src/jit/x86-shared/MacroAssembler-x86-shared.cpp
index e474f83530..1520321260 100644
--- a/js/src/jit/x86-shared/MacroAssembler-x86-shared.cpp
+++ b/js/src/jit/x86-shared/MacroAssembler-x86-shared.cpp
@@ -1143,13 +1143,13 @@ static void CompareExchange(MacroAssembler& masm,
ExtendTo32(masm, type, output);
}
-void MacroAssembler::compareExchange(Scalar::Type type, const Synchronization&,
+void MacroAssembler::compareExchange(Scalar::Type type, Synchronization,
const Address& mem, Register oldval,
Register newval, Register output) {
CompareExchange(*this, nullptr, type, mem, oldval, newval, output);
}
-void MacroAssembler::compareExchange(Scalar::Type type, const Synchronization&,
+void MacroAssembler::compareExchange(Scalar::Type type, Synchronization,
const BaseIndex& mem, Register oldval,
Register newval, Register output) {
CompareExchange(*this, nullptr, type, mem, oldval, newval, output);
@@ -1201,13 +1201,13 @@ static void AtomicExchange(MacroAssembler& masm,
ExtendTo32(masm, type, output);
}
-void MacroAssembler::atomicExchange(Scalar::Type type, const Synchronization&,
+void MacroAssembler::atomicExchange(Scalar::Type type, Synchronization,
const Address& mem, Register value,
Register output) {
AtomicExchange(*this, nullptr, type, mem, value, output);
}
-void MacroAssembler::atomicExchange(Scalar::Type type, const Synchronization&,
+void MacroAssembler::atomicExchange(Scalar::Type type, Synchronization,
const BaseIndex& mem, Register value,
Register output) {
AtomicExchange(*this, nullptr, type, mem, value, output);
@@ -1227,7 +1227,7 @@ void MacroAssembler::wasmAtomicExchange(const wasm::MemoryAccessDesc& access,
static void SetupValue(MacroAssembler& masm, AtomicOp op, Imm32 src,
Register output) {
- if (op == AtomicFetchSubOp) {
+ if (op == AtomicOp::Sub) {
masm.movl(Imm32(-src.value), output);
} else {
masm.movl(src, output);
@@ -1239,7 +1239,7 @@ static void SetupValue(MacroAssembler& masm, AtomicOp op, Register src,
if (src != output) {
masm.movl(src, output);
}
- if (op == AtomicFetchSubOp) {
+ if (op == AtomicOp::Sub) {
masm.negl(output);
}
}
@@ -1269,15 +1269,14 @@ static void AtomicFetchOp(MacroAssembler& masm,
masm.j(MacroAssembler::NonZero, &again); \
} while (0)
- MOZ_ASSERT_IF(op == AtomicFetchAddOp || op == AtomicFetchSubOp,
- temp == InvalidReg);
+ MOZ_ASSERT_IF(op == AtomicOp::Add || op == AtomicOp::Sub, temp == InvalidReg);
switch (Scalar::byteSize(arrayType)) {
case 1:
CheckBytereg(output);
switch (op) {
- case AtomicFetchAddOp:
- case AtomicFetchSubOp:
+ case AtomicOp::Add:
+ case AtomicOp::Sub:
CheckBytereg(value); // But not for the bitwise ops
SetupValue(masm, op, value, output);
if (access) {
@@ -1286,17 +1285,17 @@ static void AtomicFetchOp(MacroAssembler& masm,
}
masm.lock_xaddb(output, Operand(mem));
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
CheckBytereg(temp);
ATOMIC_BITOP_BODY(movb, wasm::TrapMachineInsn::Load8, andl,
lock_cmpxchgb);
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
CheckBytereg(temp);
ATOMIC_BITOP_BODY(movb, wasm::TrapMachineInsn::Load8, orl,
lock_cmpxchgb);
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
CheckBytereg(temp);
ATOMIC_BITOP_BODY(movb, wasm::TrapMachineInsn::Load8, xorl,
lock_cmpxchgb);
@@ -1307,8 +1306,8 @@ static void AtomicFetchOp(MacroAssembler& masm,
break;
case 2:
switch (op) {
- case AtomicFetchAddOp:
- case AtomicFetchSubOp:
+ case AtomicOp::Add:
+ case AtomicOp::Sub:
SetupValue(masm, op, value, output);
if (access) {
masm.append(*access, wasm::TrapMachineInsn::Atomic,
@@ -1316,15 +1315,15 @@ static void AtomicFetchOp(MacroAssembler& masm,
}
masm.lock_xaddw(output, Operand(mem));
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
ATOMIC_BITOP_BODY(movw, wasm::TrapMachineInsn::Load16, andl,
lock_cmpxchgw);
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
ATOMIC_BITOP_BODY(movw, wasm::TrapMachineInsn::Load16, orl,
lock_cmpxchgw);
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
ATOMIC_BITOP_BODY(movw, wasm::TrapMachineInsn::Load16, xorl,
lock_cmpxchgw);
break;
@@ -1334,8 +1333,8 @@ static void AtomicFetchOp(MacroAssembler& masm,
break;
case 4:
switch (op) {
- case AtomicFetchAddOp:
- case AtomicFetchSubOp:
+ case AtomicOp::Add:
+ case AtomicOp::Sub:
SetupValue(masm, op, value, output);
if (access) {
masm.append(*access, wasm::TrapMachineInsn::Atomic,
@@ -1343,15 +1342,15 @@ static void AtomicFetchOp(MacroAssembler& masm,
}
masm.lock_xaddl(output, Operand(mem));
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
ATOMIC_BITOP_BODY(movl, wasm::TrapMachineInsn::Load32, andl,
lock_cmpxchgl);
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
ATOMIC_BITOP_BODY(movl, wasm::TrapMachineInsn::Load32, orl,
lock_cmpxchgl);
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
ATOMIC_BITOP_BODY(movl, wasm::TrapMachineInsn::Load32, xorl,
lock_cmpxchgl);
break;
@@ -1367,30 +1366,29 @@ static void AtomicFetchOp(MacroAssembler& masm,
#undef ATOMIC_BITOP_BODY
}
-void MacroAssembler::atomicFetchOp(Scalar::Type arrayType,
- const Synchronization&, AtomicOp op,
- Register value, const BaseIndex& mem,
- Register temp, Register output) {
+void MacroAssembler::atomicFetchOp(Scalar::Type arrayType, Synchronization,
+ AtomicOp op, Register value,
+ const BaseIndex& mem, Register temp,
+ Register output) {
AtomicFetchOp(*this, nullptr, arrayType, op, value, mem, temp, output);
}
-void MacroAssembler::atomicFetchOp(Scalar::Type arrayType,
- const Synchronization&, AtomicOp op,
- Register value, const Address& mem,
- Register temp, Register output) {
+void MacroAssembler::atomicFetchOp(Scalar::Type arrayType, Synchronization,
+ AtomicOp op, Register value,
+ const Address& mem, Register temp,
+ Register output) {
AtomicFetchOp(*this, nullptr, arrayType, op, value, mem, temp, output);
}
-void MacroAssembler::atomicFetchOp(Scalar::Type arrayType,
- const Synchronization&, AtomicOp op,
- Imm32 value, const BaseIndex& mem,
- Register temp, Register output) {
+void MacroAssembler::atomicFetchOp(Scalar::Type arrayType, Synchronization,
+ AtomicOp op, Imm32 value,
+ const BaseIndex& mem, Register temp,
+ Register output) {
AtomicFetchOp(*this, nullptr, arrayType, op, value, mem, temp, output);
}
-void MacroAssembler::atomicFetchOp(Scalar::Type arrayType,
- const Synchronization&, AtomicOp op,
- Imm32 value, const Address& mem,
+void MacroAssembler::atomicFetchOp(Scalar::Type arrayType, Synchronization,
+ AtomicOp op, Imm32 value, const Address& mem,
Register temp, Register output) {
AtomicFetchOp(*this, nullptr, arrayType, op, value, mem, temp, output);
}
@@ -1436,19 +1434,19 @@ static void AtomicEffectOp(MacroAssembler& masm,
switch (Scalar::byteSize(arrayType)) {
case 1:
switch (op) {
- case AtomicFetchAddOp:
+ case AtomicOp::Add:
masm.lock_addb(value, Operand(mem));
break;
- case AtomicFetchSubOp:
+ case AtomicOp::Sub:
masm.lock_subb(value, Operand(mem));
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
masm.lock_andb(value, Operand(mem));
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
masm.lock_orb(value, Operand(mem));
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
masm.lock_xorb(value, Operand(mem));
break;
default:
@@ -1457,19 +1455,19 @@ static void AtomicEffectOp(MacroAssembler& masm,
break;
case 2:
switch (op) {
- case AtomicFetchAddOp:
+ case AtomicOp::Add:
masm.lock_addw(value, Operand(mem));
break;
- case AtomicFetchSubOp:
+ case AtomicOp::Sub:
masm.lock_subw(value, Operand(mem));
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
masm.lock_andw(value, Operand(mem));
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
masm.lock_orw(value, Operand(mem));
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
masm.lock_xorw(value, Operand(mem));
break;
default:
@@ -1478,19 +1476,19 @@ static void AtomicEffectOp(MacroAssembler& masm,
break;
case 4:
switch (op) {
- case AtomicFetchAddOp:
+ case AtomicOp::Add:
masm.lock_addl(value, Operand(mem));
break;
- case AtomicFetchSubOp:
+ case AtomicOp::Sub:
masm.lock_subl(value, Operand(mem));
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
masm.lock_andl(value, Operand(mem));
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
masm.lock_orl(value, Operand(mem));
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
masm.lock_xorl(value, Operand(mem));
break;
default:
@@ -1535,7 +1533,7 @@ void MacroAssembler::wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access,
template <typename T>
static void CompareExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
- const Synchronization& sync, const T& mem,
+ Synchronization sync, const T& mem,
Register oldval, Register newval, Register temp,
AnyRegister output) {
if (arrayType == Scalar::Uint32) {
@@ -1547,15 +1545,14 @@ static void CompareExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
}
void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
- const Synchronization& sync,
- const Address& mem, Register oldval,
- Register newval, Register temp,
- AnyRegister output) {
+ Synchronization sync, const Address& mem,
+ Register oldval, Register newval,
+ Register temp, AnyRegister output) {
CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval, temp, output);
}
void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
- const Synchronization& sync,
+ Synchronization sync,
const BaseIndex& mem, Register oldval,
Register newval, Register temp,
AnyRegister output) {
@@ -1564,9 +1561,8 @@ void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
template <typename T>
static void AtomicExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
- const Synchronization& sync, const T& mem,
- Register value, Register temp,
- AnyRegister output) {
+ Synchronization sync, const T& mem, Register value,
+ Register temp, AnyRegister output) {
if (arrayType == Scalar::Uint32) {
masm.atomicExchange(arrayType, sync, mem, value, temp);
masm.convertUInt32ToDouble(temp, output.fpu());
@@ -1576,14 +1572,14 @@ static void AtomicExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
}
void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
- const Synchronization& sync,
- const Address& mem, Register value,
- Register temp, AnyRegister output) {
+ Synchronization sync, const Address& mem,
+ Register value, Register temp,
+ AnyRegister output) {
AtomicExchangeJS(*this, arrayType, sync, mem, value, temp, output);
}
void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
- const Synchronization& sync,
+ Synchronization sync,
const BaseIndex& mem, Register value,
Register temp, AnyRegister output) {
AtomicExchangeJS(*this, arrayType, sync, mem, value, temp, output);
@@ -1591,9 +1587,9 @@ void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
template <typename T>
static void AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
- Register value, const T& mem, Register temp1,
- Register temp2, AnyRegister output) {
+ Synchronization sync, AtomicOp op, Register value,
+ const T& mem, Register temp1, Register temp2,
+ AnyRegister output) {
if (arrayType == Scalar::Uint32) {
masm.atomicFetchOp(arrayType, sync, op, value, mem, temp2, temp1);
masm.convertUInt32ToDouble(temp1, output.fpu());
@@ -1603,7 +1599,7 @@ static void AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType,
}
void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
+ Synchronization sync, AtomicOp op,
Register value, const Address& mem,
Register temp1, Register temp2,
AnyRegister output) {
@@ -1611,39 +1607,36 @@ void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
}
void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
+ Synchronization sync, AtomicOp op,
Register value, const BaseIndex& mem,
Register temp1, Register temp2,
AnyRegister output) {
AtomicFetchOpJS(*this, arrayType, sync, op, value, mem, temp1, temp2, output);
}
-void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
- const Synchronization&, AtomicOp op,
- Register value, const BaseIndex& mem,
- Register temp) {
+void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType, Synchronization,
+ AtomicOp op, Register value,
+ const BaseIndex& mem, Register temp) {
MOZ_ASSERT(temp == InvalidReg);
AtomicEffectOp(*this, nullptr, arrayType, op, value, mem);
}
-void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
- const Synchronization&, AtomicOp op,
- Register value, const Address& mem,
- Register temp) {
+void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType, Synchronization,
+ AtomicOp op, Register value,
+ const Address& mem, Register temp) {
MOZ_ASSERT(temp == InvalidReg);
AtomicEffectOp(*this, nullptr, arrayType, op, value, mem);
}
-void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
- const Synchronization&, AtomicOp op,
- Imm32 value, const Address& mem,
- Register temp) {
+void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType, Synchronization,
+ AtomicOp op, Imm32 value,
+ const Address& mem, Register temp) {
MOZ_ASSERT(temp == InvalidReg);
AtomicEffectOp(*this, nullptr, arrayType, op, value, mem);
}
void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
+ Synchronization sync, AtomicOp op,
Imm32 value, const BaseIndex& mem,
Register temp) {
MOZ_ASSERT(temp == InvalidReg);
@@ -1652,9 +1645,9 @@ void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
template <typename T>
static void AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
- Imm32 value, const T& mem, Register temp1,
- Register temp2, AnyRegister output) {
+ Synchronization sync, AtomicOp op, Imm32 value,
+ const T& mem, Register temp1, Register temp2,
+ AnyRegister output) {
if (arrayType == Scalar::Uint32) {
masm.atomicFetchOp(arrayType, sync, op, value, mem, temp2, temp1);
masm.convertUInt32ToDouble(temp1, output.fpu());
@@ -1664,7 +1657,7 @@ static void AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType,
}
void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
+ Synchronization sync, AtomicOp op,
Imm32 value, const Address& mem,
Register temp1, Register temp2,
AnyRegister output) {
@@ -1672,7 +1665,7 @@ void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
}
void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
+ Synchronization sync, AtomicOp op,
Imm32 value, const BaseIndex& mem,
Register temp1, Register temp2,
AnyRegister output) {
diff --git a/js/src/jit/x86-shared/MacroAssembler-x86-shared.h b/js/src/jit/x86-shared/MacroAssembler-x86-shared.h
index dd1ae53537..21af90e90d 100644
--- a/js/src/jit/x86-shared/MacroAssembler-x86-shared.h
+++ b/js/src/jit/x86-shared/MacroAssembler-x86-shared.h
@@ -161,15 +161,6 @@ class MacroAssemblerX86Shared : public Assembler {
void atomic_inc32(const Operand& addr) { lock_incl(addr); }
void atomic_dec32(const Operand& addr) { lock_decl(addr); }
- void storeLoadFence() {
- // This implementation follows Linux.
- if (HasSSE2()) {
- masm.mfence();
- } else {
- lock_addl(Imm32(0), Operand(Address(esp, 0)));
- }
- }
-
void branch16(Condition cond, Register lhs, Register rhs, Label* label) {
cmpw(rhs, lhs);
j(cond, label);