summaryrefslogtreecommitdiffstats
path: root/js/src/jit
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 01:13:27 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 01:13:27 +0000
commit40a355a42d4a9444dc753c04c6608dade2f06a23 (patch)
tree871fc667d2de662f171103ce5ec067014ef85e61 /js/src/jit
parentAdding upstream version 124.0.1. (diff)
downloadfirefox-40a355a42d4a9444dc753c04c6608dade2f06a23.tar.xz
firefox-40a355a42d4a9444dc753c04c6608dade2f06a23.zip
Adding upstream version 125.0.1.upstream/125.0.1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'js/src/jit')
-rw-r--r--js/src/jit/AtomicOp.h22
-rw-r--r--js/src/jit/BaselineCacheIRCompiler.cpp12
-rw-r--r--js/src/jit/CacheIR.cpp539
-rw-r--r--js/src/jit/CacheIR.h10
-rw-r--r--js/src/jit/CacheIRCompiler.cpp710
-rw-r--r--js/src/jit/CacheIRCompiler.h35
-rw-r--r--js/src/jit/CacheIRGenerator.h7
-rw-r--r--js/src/jit/CacheIROps.yaml115
-rw-r--r--js/src/jit/CacheIRReader.h3
-rw-r--r--js/src/jit/CacheIRSpewer.cpp6
-rw-r--r--js/src/jit/CacheIRWriter.h5
-rw-r--r--js/src/jit/CodeGenerator.cpp183
-rw-r--r--js/src/jit/Disassemble.cpp27
-rw-r--r--js/src/jit/ExecutableAllocator.h3
-rw-r--r--js/src/jit/GenerateAtomicOperations.py10
-rw-r--r--js/src/jit/GenerateCacheIRFiles.py8
-rw-r--r--js/src/jit/IonAnalysis.cpp17
-rw-r--r--js/src/jit/IonOptimizationLevels.h4
-rw-r--r--js/src/jit/JitFrames.cpp105
-rw-r--r--js/src/jit/JitFrames.h2
-rw-r--r--js/src/jit/JitOptions.cpp4
-rw-r--r--js/src/jit/JitOptions.h2
-rw-r--r--js/src/jit/JitRuntime.h16
-rw-r--r--js/src/jit/JitScript.cpp11
-rw-r--r--js/src/jit/JitSpewer.cpp3
-rw-r--r--js/src/jit/JitSpewer.h2
-rw-r--r--js/src/jit/JitZone.h3
-rw-r--r--js/src/jit/LIROps.yaml59
-rw-r--r--js/src/jit/Lowering.cpp139
-rw-r--r--js/src/jit/MIR.cpp85
-rw-r--r--js/src/jit/MIR.h213
-rw-r--r--js/src/jit/MIROps.yaml84
-rw-r--r--js/src/jit/MacroAssembler-inl.h63
-rw-r--r--js/src/jit/MacroAssembler.cpp255
-rw-r--r--js/src/jit/MacroAssembler.h247
-rw-r--r--js/src/jit/PcScriptCache.h88
-rw-r--r--js/src/jit/RangeAnalysis.cpp19
-rw-r--r--js/src/jit/Recover.cpp11
-rw-r--r--js/src/jit/Registers.h1
-rw-r--r--js/src/jit/VMFunctions.cpp82
-rw-r--r--js/src/jit/VMFunctions.h43
-rw-r--r--js/src/jit/WarpBuilderShared.cpp9
-rw-r--r--js/src/jit/WarpCacheIRTranspiler.cpp506
-rw-r--r--js/src/jit/arm/CodeGenerator-arm.cpp4
-rw-r--r--js/src/jit/arm/MacroAssembler-arm.cpp160
-rw-r--r--js/src/jit/arm64/CodeGenerator-arm64.cpp4
-rw-r--r--js/src/jit/arm64/MacroAssembler-arm64.cpp122
-rw-r--r--js/src/jit/loong64/Assembler-loong64.cpp6
-rw-r--r--js/src/jit/loong64/Assembler-loong64.h11
-rw-r--r--js/src/jit/loong64/CodeGenerator-loong64.cpp4
-rw-r--r--js/src/jit/loong64/MacroAssembler-loong64.cpp178
-rw-r--r--js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp4
-rw-r--r--js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp136
-rw-r--r--js/src/jit/mips32/MacroAssembler-mips32.cpp10
-rw-r--r--js/src/jit/mips64/MacroAssembler-mips64.cpp42
-rw-r--r--js/src/jit/riscv64/CodeGenerator-riscv64.cpp4
-rw-r--r--js/src/jit/riscv64/MacroAssembler-riscv64.cpp178
-rw-r--r--js/src/jit/shared/Assembler-shared.h4
-rw-r--r--js/src/jit/shared/LIR-shared.h11
-rw-r--r--js/src/jit/wasm32/CodeGenerator-wasm32.cpp1
-rw-r--r--js/src/jit/x64/CodeGenerator-x64.cpp2
-rw-r--r--js/src/jit/x64/Lowering-x64.cpp8
-rw-r--r--js/src/jit/x64/MacroAssembler-x64.cpp45
-rw-r--r--js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp6
-rw-r--r--js/src/jit/x86-shared/Lowering-x86-shared.cpp4
-rw-r--r--js/src/jit/x86-shared/MacroAssembler-x86-shared-inl.h3
-rw-r--r--js/src/jit/x86-shared/MacroAssembler-x86-shared.cpp171
-rw-r--r--js/src/jit/x86-shared/MacroAssembler-x86-shared.h9
-rw-r--r--js/src/jit/x86/Lowering-x86.cpp4
-rw-r--r--js/src/jit/x86/MacroAssembler-x86.cpp39
70 files changed, 3348 insertions, 1590 deletions
diff --git a/js/src/jit/AtomicOp.h b/js/src/jit/AtomicOp.h
index 90edb631cb..ed9a6dd74c 100644
--- a/js/src/jit/AtomicOp.h
+++ b/js/src/jit/AtomicOp.h
@@ -7,17 +7,19 @@
#ifndef jit_AtomicOp_h
#define jit_AtomicOp_h
+#include <stdint.h>
+
namespace js {
namespace jit {
// Types of atomic operation, shared by MIR and LIR.
-enum AtomicOp {
- AtomicFetchAddOp,
- AtomicFetchSubOp,
- AtomicFetchAndOp,
- AtomicFetchOrOp,
- AtomicFetchXorOp
+enum class AtomicOp {
+ Add,
+ Sub,
+ And,
+ Or,
+ Xor,
};
// Memory barrier types, shared by MIR and LIR.
@@ -26,7 +28,7 @@ enum AtomicOp {
// distinction (DSB vs DMB on ARM, SYNC vs parameterized SYNC on MIPS)
// but there's been no reason to use it yet.
-enum MemoryBarrierBits {
+enum MemoryBarrierBits : uint8_t {
MembarLoadLoad = 1,
MembarLoadStore = 2,
MembarStoreStore = 4,
@@ -41,16 +43,16 @@ enum MemoryBarrierBits {
static inline constexpr MemoryBarrierBits operator|(MemoryBarrierBits a,
MemoryBarrierBits b) {
- return MemoryBarrierBits(int(a) | int(b));
+ return MemoryBarrierBits(static_cast<uint8_t>(a) | static_cast<uint8_t>(b));
}
static inline constexpr MemoryBarrierBits operator&(MemoryBarrierBits a,
MemoryBarrierBits b) {
- return MemoryBarrierBits(int(a) & int(b));
+ return MemoryBarrierBits(static_cast<uint8_t>(a) & static_cast<uint8_t>(b));
}
static inline constexpr MemoryBarrierBits operator~(MemoryBarrierBits a) {
- return MemoryBarrierBits(~int(a));
+ return MemoryBarrierBits(~static_cast<uint8_t>(a));
}
// Standard barrier bits for a full barrier.
diff --git a/js/src/jit/BaselineCacheIRCompiler.cpp b/js/src/jit/BaselineCacheIRCompiler.cpp
index 171771ed51..92490ef8b8 100644
--- a/js/src/jit/BaselineCacheIRCompiler.cpp
+++ b/js/src/jit/BaselineCacheIRCompiler.cpp
@@ -2170,8 +2170,13 @@ void ShapeListObject::trace(JSTracer* trc, JSObject* obj) {
}
bool ShapeListObject::traceWeak(JSTracer* trc) {
+ uint32_t length = getDenseInitializedLength();
+ if (length == 0) {
+ return false; // Object may be uninitialized.
+ }
+
const HeapSlot* src = elements_;
- const HeapSlot* end = src + getDenseInitializedLength();
+ const HeapSlot* end = src + length;
HeapSlot* dst = elements_;
while (src != end) {
Shape* shape = static_cast<Shape*>(src->toPrivate());
@@ -2184,7 +2189,7 @@ bool ShapeListObject::traceWeak(JSTracer* trc) {
}
MOZ_ASSERT(dst <= end);
- size_t length = dst - elements_;
+ length = dst - elements_;
setDenseInitializedLength(length);
return length != 0;
@@ -3446,6 +3451,9 @@ void BaselineCacheIRCompiler::createThis(Register argcReg, Register calleeReg,
// Restore saved registers.
masm.PopRegsInMask(liveNonGCRegs);
+
+ // Restore ICStubReg. The stub might have been moved if CreateThisFromIC
+ // discarded JIT code.
Address stubAddr(FramePointer, BaselineStubFrameLayout::ICStubOffsetFromFP);
masm.loadPtr(stubAddr, ICStubReg);
diff --git a/js/src/jit/CacheIR.cpp b/js/src/jit/CacheIR.cpp
index c2245e38b5..68dbd6bfee 100644
--- a/js/src/jit/CacheIR.cpp
+++ b/js/src/jit/CacheIR.cpp
@@ -520,9 +520,14 @@ enum class NativeGetPropKind {
static NativeGetPropKind IsCacheableGetPropCall(NativeObject* obj,
NativeObject* holder,
- PropertyInfo prop) {
+ PropertyInfo prop,
+ jsbytecode* pc = nullptr) {
MOZ_ASSERT(IsCacheableProtoChain(obj, holder));
+ if (pc && JSOp(*pc) == JSOp::GetBoundName) {
+ return NativeGetPropKind::None;
+ }
+
if (!prop.isAccessorProperty()) {
return NativeGetPropKind::None;
}
@@ -615,7 +620,7 @@ static NativeGetPropKind CanAttachNativeGetProp(JSContext* cx, JSObject* obj,
return NativeGetPropKind::Slot;
}
- return IsCacheableGetPropCall(nobj, *holder, propInfo->ref());
+ return IsCacheableGetPropCall(nobj, *holder, propInfo->ref(), pc);
}
if (!prop.isFound()) {
@@ -1975,6 +1980,46 @@ AttachDecision GetPropIRGenerator::tryAttachProxy(HandleObject obj,
MOZ_CRASH("Unexpected ProxyStubType");
}
+const JSClass* js::jit::ClassFor(GuardClassKind kind) {
+ switch (kind) {
+ case GuardClassKind::Array:
+ return &ArrayObject::class_;
+ case GuardClassKind::PlainObject:
+ return &PlainObject::class_;
+ case GuardClassKind::FixedLengthArrayBuffer:
+ return &FixedLengthArrayBufferObject::class_;
+ case GuardClassKind::ResizableArrayBuffer:
+ return &ResizableArrayBufferObject::class_;
+ case GuardClassKind::FixedLengthSharedArrayBuffer:
+ return &FixedLengthSharedArrayBufferObject::class_;
+ case GuardClassKind::GrowableSharedArrayBuffer:
+ return &GrowableSharedArrayBufferObject::class_;
+ case GuardClassKind::FixedLengthDataView:
+ return &FixedLengthDataViewObject::class_;
+ case GuardClassKind::ResizableDataView:
+ return &ResizableDataViewObject::class_;
+ case GuardClassKind::MappedArguments:
+ return &MappedArgumentsObject::class_;
+ case GuardClassKind::UnmappedArguments:
+ return &UnmappedArgumentsObject::class_;
+ case GuardClassKind::WindowProxy:
+ // Caller needs to handle this case, see
+ // JSRuntime::maybeWindowProxyClass().
+ break;
+ case GuardClassKind::JSFunction:
+ // Caller needs to handle this case. Can be either |js::FunctionClass| or
+ // |js::ExtendedFunctionClass|.
+ break;
+ case GuardClassKind::BoundFunction:
+ return &BoundFunctionObject::class_;
+ case GuardClassKind::Set:
+ return &SetObject::class_;
+ case GuardClassKind::Map:
+ return &MapObject::class_;
+ }
+ MOZ_CRASH("unexpected kind");
+}
+
// Guards the class of an object. Because shape implies class, and a shape guard
// is faster than a class guard, if this is our first time attaching a stub, we
// instead generate a shape guard.
@@ -1983,25 +2028,16 @@ void IRGenerator::emitOptimisticClassGuard(ObjOperandId objId, JSObject* obj,
#ifdef DEBUG
switch (kind) {
case GuardClassKind::Array:
- MOZ_ASSERT(obj->is<ArrayObject>());
- break;
case GuardClassKind::PlainObject:
- MOZ_ASSERT(obj->is<PlainObject>());
- break;
case GuardClassKind::FixedLengthArrayBuffer:
- MOZ_ASSERT(obj->is<FixedLengthArrayBufferObject>());
- break;
+ case GuardClassKind::ResizableArrayBuffer:
case GuardClassKind::FixedLengthSharedArrayBuffer:
- MOZ_ASSERT(obj->is<FixedLengthSharedArrayBufferObject>());
- break;
+ case GuardClassKind::GrowableSharedArrayBuffer:
case GuardClassKind::FixedLengthDataView:
- MOZ_ASSERT(obj->is<FixedLengthDataViewObject>());
- break;
+ case GuardClassKind::ResizableDataView:
case GuardClassKind::Set:
- MOZ_ASSERT(obj->is<SetObject>());
- break;
case GuardClassKind::Map:
- MOZ_ASSERT(obj->is<MapObject>());
+ MOZ_ASSERT(obj->hasClass(ClassFor(kind)));
break;
case GuardClassKind::MappedArguments:
@@ -2077,8 +2113,7 @@ AttachDecision GetPropIRGenerator::tryAttachObjectLength(HandleObject obj,
AttachDecision GetPropIRGenerator::tryAttachTypedArray(HandleObject obj,
ObjOperandId objId,
HandleId id) {
- // TODO: Support resizable typed arrays. (bug 1842999)
- if (!obj->is<FixedLengthTypedArrayObject>()) {
+ if (!obj->is<TypedArrayObject>()) {
return AttachDecision::NoAction;
}
@@ -2120,31 +2155,52 @@ AttachDecision GetPropIRGenerator::tryAttachTypedArray(HandleObject obj,
}
}
- auto* tarr = &obj->as<FixedLengthTypedArrayObject>();
+ auto* tarr = &obj->as<TypedArrayObject>();
maybeEmitIdGuard(id);
// Emit all the normal guards for calling this native, but specialize
// callNativeGetterResult.
EmitCallGetterResultGuards(writer, tarr, holder, id, *prop, objId, mode_);
if (isLength) {
- if (tarr->length() <= INT32_MAX) {
- writer.loadArrayBufferViewLengthInt32Result(objId);
+ size_t length = tarr->length().valueOr(0);
+ if (tarr->is<FixedLengthTypedArrayObject>()) {
+ if (length <= INT32_MAX) {
+ writer.loadArrayBufferViewLengthInt32Result(objId);
+ } else {
+ writer.loadArrayBufferViewLengthDoubleResult(objId);
+ }
} else {
- writer.loadArrayBufferViewLengthDoubleResult(objId);
+ if (length <= INT32_MAX) {
+ writer.resizableTypedArrayLengthInt32Result(objId);
+ } else {
+ writer.resizableTypedArrayLengthDoubleResult(objId);
+ }
}
trackAttached("GetProp.TypedArrayLength");
} else if (isByteOffset) {
- if (tarr->byteOffset() <= INT32_MAX) {
+ // byteOffset doesn't need to use different code paths for fixed-length and
+ // resizable TypedArrays.
+ size_t byteOffset = tarr->byteOffset().valueOr(0);
+ if (byteOffset <= INT32_MAX) {
writer.arrayBufferViewByteOffsetInt32Result(objId);
} else {
writer.arrayBufferViewByteOffsetDoubleResult(objId);
}
trackAttached("GetProp.TypedArrayByteOffset");
} else {
- if (tarr->byteLength() <= INT32_MAX) {
- writer.typedArrayByteLengthInt32Result(objId);
+ size_t byteLength = tarr->byteLength().valueOr(0);
+ if (tarr->is<FixedLengthTypedArrayObject>()) {
+ if (byteLength <= INT32_MAX) {
+ writer.typedArrayByteLengthInt32Result(objId);
+ } else {
+ writer.typedArrayByteLengthDoubleResult(objId);
+ }
} else {
- writer.typedArrayByteLengthDoubleResult(objId);
+ if (byteLength <= INT32_MAX) {
+ writer.resizableTypedArrayByteLengthInt32Result(objId);
+ } else {
+ writer.resizableTypedArrayByteLengthDoubleResult(objId);
+ }
}
trackAttached("GetProp.TypedArrayByteLength");
}
@@ -2156,11 +2212,10 @@ AttachDecision GetPropIRGenerator::tryAttachTypedArray(HandleObject obj,
AttachDecision GetPropIRGenerator::tryAttachDataView(HandleObject obj,
ObjOperandId objId,
HandleId id) {
- // TODO: Support resizable dataviews. (bug 1842999)
- if (!obj->is<FixedLengthDataViewObject>()) {
+ if (!obj->is<DataViewObject>()) {
return AttachDecision::NoAction;
}
- auto* dv = &obj->as<FixedLengthDataViewObject>();
+ auto* dv = &obj->as<DataViewObject>();
if (mode_ != ICState::Mode::Specialized) {
return AttachDecision::NoAction;
@@ -2181,6 +2236,12 @@ AttachDecision GetPropIRGenerator::tryAttachDataView(HandleObject obj,
return AttachDecision::NoAction;
}
+ // byteOffset and byteLength both throw when the ArrayBuffer is out-of-bounds.
+ if (dv->is<ResizableDataViewObject>() &&
+ dv->as<ResizableDataViewObject>().isOutOfBounds()) {
+ return AttachDecision::NoAction;
+ }
+
NativeObject* holder = nullptr;
Maybe<PropertyInfo> prop;
NativeGetPropKind kind =
@@ -2205,18 +2266,33 @@ AttachDecision GetPropIRGenerator::tryAttachDataView(HandleObject obj,
// callNativeGetterResult.
EmitCallGetterResultGuards(writer, dv, holder, id, *prop, objId, mode_);
writer.guardHasAttachedArrayBuffer(objId);
+ if (dv->is<ResizableDataViewObject>()) {
+ writer.guardResizableArrayBufferViewInBounds(objId);
+ }
if (isByteOffset) {
- if (dv->byteOffset() <= INT32_MAX) {
+ // byteOffset doesn't need to use different code paths for fixed-length and
+ // resizable DataViews.
+ size_t byteOffset = dv->byteOffset().valueOr(0);
+ if (byteOffset <= INT32_MAX) {
writer.arrayBufferViewByteOffsetInt32Result(objId);
} else {
writer.arrayBufferViewByteOffsetDoubleResult(objId);
}
trackAttached("GetProp.DataViewByteOffset");
} else {
- if (dv->byteLength() <= INT32_MAX) {
- writer.loadArrayBufferViewLengthInt32Result(objId);
+ size_t byteLength = dv->byteLength().valueOr(0);
+ if (dv->is<FixedLengthDataViewObject>()) {
+ if (byteLength <= INT32_MAX) {
+ writer.loadArrayBufferViewLengthInt32Result(objId);
+ } else {
+ writer.loadArrayBufferViewLengthDoubleResult(objId);
+ }
} else {
- writer.loadArrayBufferViewLengthDoubleResult(objId);
+ if (byteLength <= INT32_MAX) {
+ writer.resizableDataViewByteLengthInt32Result(objId);
+ } else {
+ writer.resizableDataViewByteLengthDoubleResult(objId);
+ }
}
trackAttached("GetProp.DataViewByteLength");
}
@@ -2232,11 +2308,6 @@ AttachDecision GetPropIRGenerator::tryAttachArrayBufferMaybeShared(
}
auto* buf = &obj->as<ArrayBufferObjectMaybeShared>();
- // TODO: Support resizable buffers. (bug 1842999)
- if (buf->isResizable()) {
- return AttachDecision::NoAction;
- }
-
if (mode_ != ICState::Mode::Specialized) {
return AttachDecision::NoAction;
}
@@ -2273,10 +2344,18 @@ AttachDecision GetPropIRGenerator::tryAttachArrayBufferMaybeShared(
// Emit all the normal guards for calling this native, but specialize
// callNativeGetterResult.
EmitCallGetterResultGuards(writer, buf, holder, id, *prop, objId, mode_);
- if (buf->byteLength() <= INT32_MAX) {
- writer.loadArrayBufferByteLengthInt32Result(objId);
+ if (!buf->is<GrowableSharedArrayBufferObject>()) {
+ if (buf->byteLength() <= INT32_MAX) {
+ writer.loadArrayBufferByteLengthInt32Result(objId);
+ } else {
+ writer.loadArrayBufferByteLengthDoubleResult(objId);
+ }
} else {
- writer.loadArrayBufferByteLengthDoubleResult(objId);
+ if (buf->byteLength() <= INT32_MAX) {
+ writer.growableSharedArrayBufferByteLengthInt32Result(objId);
+ } else {
+ writer.growableSharedArrayBufferByteLengthDoubleResult(objId);
+ }
}
writer.returnFromIC();
@@ -3044,9 +3123,8 @@ AttachDecision GetPropIRGenerator::tryAttachSparseElement(
// For Uint32Array we let the stub return an Int32 if we have not seen a
// double, to allow better codegen in Warp while avoiding bailout loops.
-static bool ForceDoubleForUint32Array(FixedLengthTypedArrayObject* tarr,
- uint64_t index) {
- MOZ_ASSERT(index < tarr->length());
+static bool ForceDoubleForUint32Array(TypedArrayObject* tarr, uint64_t index) {
+ MOZ_ASSERT(index < tarr->length().valueOr(0));
if (tarr->type() != Scalar::Type::Uint32) {
// Return value is only relevant for Uint32Array.
@@ -3059,10 +3137,27 @@ static bool ForceDoubleForUint32Array(FixedLengthTypedArrayObject* tarr,
return res.isDouble();
}
+static ArrayBufferViewKind ToArrayBufferViewKind(const TypedArrayObject* obj) {
+ if (obj->is<FixedLengthTypedArrayObject>()) {
+ return ArrayBufferViewKind::FixedLength;
+ }
+
+ MOZ_ASSERT(obj->is<ResizableTypedArrayObject>());
+ return ArrayBufferViewKind::Resizable;
+}
+
+static ArrayBufferViewKind ToArrayBufferViewKind(const DataViewObject* obj) {
+ if (obj->is<FixedLengthDataViewObject>()) {
+ return ArrayBufferViewKind::FixedLength;
+ }
+
+ MOZ_ASSERT(obj->is<ResizableDataViewObject>());
+ return ArrayBufferViewKind::Resizable;
+}
+
AttachDecision GetPropIRGenerator::tryAttachTypedArrayElement(
HandleObject obj, ObjOperandId objId) {
- // TODO: Support resizable typed arrays. (bug 1842999)
- if (!obj->is<FixedLengthTypedArrayObject>()) {
+ if (!obj->is<TypedArrayObject>()) {
return AttachDecision::NoAction;
}
@@ -3070,12 +3165,12 @@ AttachDecision GetPropIRGenerator::tryAttachTypedArrayElement(
return AttachDecision::NoAction;
}
- auto* tarr = &obj->as<FixedLengthTypedArrayObject>();
+ auto* tarr = &obj->as<TypedArrayObject>();
bool handleOOB = false;
int64_t indexInt64;
if (!ValueIsInt64Index(idVal_, &indexInt64) || indexInt64 < 0 ||
- uint64_t(indexInt64) >= tarr->length()) {
+ uint64_t(indexInt64) >= tarr->length().valueOr(0)) {
handleOOB = true;
}
@@ -3092,8 +3187,9 @@ AttachDecision GetPropIRGenerator::tryAttachTypedArrayElement(
ValOperandId keyId = getElemKeyValueId();
IntPtrOperandId intPtrIndexId = guardToIntPtrIndex(idVal_, keyId, handleOOB);
+ auto viewKind = ToArrayBufferViewKind(tarr);
writer.loadTypedArrayElementResult(objId, intPtrIndexId, tarr->type(),
- handleOOB, forceDoubleForUint32);
+ handleOOB, forceDoubleForUint32, viewKind);
writer.returnFromIC();
trackAttached("GetProp.TypedElement");
@@ -3376,7 +3472,7 @@ AttachDecision GetNameIRGenerator::tryAttachGlobalNameGetter(ObjOperandId objId,
GlobalObject* global = &globalLexical->global();
- NativeGetPropKind kind = IsCacheableGetPropCall(global, holder, *prop);
+ NativeGetPropKind kind = IsCacheableGetPropCall(global, holder, *prop, pc_);
if (kind != NativeGetPropKind::NativeGetter &&
kind != NativeGetPropKind::ScriptedGetter) {
return AttachDecision::NoAction;
@@ -3957,11 +4053,19 @@ AttachDecision HasPropIRGenerator::tryAttachNative(NativeObject* obj,
return AttachDecision::Attach;
}
+static void EmitGuardTypedArray(CacheIRWriter& writer, TypedArrayObject* obj,
+ ObjOperandId objId) {
+ if (obj->is<FixedLengthTypedArrayObject>()) {
+ writer.guardIsFixedLengthTypedArray(objId);
+ } else {
+ writer.guardIsResizableTypedArray(objId);
+ }
+}
+
AttachDecision HasPropIRGenerator::tryAttachTypedArray(HandleObject obj,
ObjOperandId objId,
ValOperandId keyId) {
- // TODO: Support resizable typed arrays. (bug 1842999)
- if (!obj->is<FixedLengthTypedArrayObject>()) {
+ if (!obj->is<TypedArrayObject>()) {
return AttachDecision::NoAction;
}
@@ -3970,10 +4074,14 @@ AttachDecision HasPropIRGenerator::tryAttachTypedArray(HandleObject obj,
return AttachDecision::NoAction;
}
- writer.guardIsFixedLengthTypedArray(objId);
+ auto* tarr = &obj->as<TypedArrayObject>();
+ EmitGuardTypedArray(writer, tarr, objId);
+
IntPtrOperandId intPtrIndexId =
guardToIntPtrIndex(idVal_, keyId, /* supportOOB = */ true);
- writer.loadTypedArrayElementExistsResult(objId, intPtrIndexId);
+
+ auto viewKind = ToArrayBufferViewKind(tarr);
+ writer.loadTypedArrayElementExistsResult(objId, intPtrIndexId, viewKind);
writer.returnFromIC();
trackAttached("HasProp.TypedArrayObject");
@@ -4940,15 +5048,14 @@ AttachDecision SetPropIRGenerator::tryAttachAddOrUpdateSparseElement(
AttachDecision SetPropIRGenerator::tryAttachSetTypedArrayElement(
HandleObject obj, ObjOperandId objId, ValOperandId rhsId) {
- // TODO: Support resizable typed arrays. (bug 1842999)
- if (!obj->is<FixedLengthTypedArrayObject>()) {
+ if (!obj->is<TypedArrayObject>()) {
return AttachDecision::NoAction;
}
if (!idVal_.isNumber()) {
return AttachDecision::NoAction;
}
- auto* tarr = &obj->as<FixedLengthTypedArrayObject>();
+ auto* tarr = &obj->as<TypedArrayObject>();
Scalar::Type elementType = tarr->type();
// Don't attach if the input type doesn't match the guard added below.
@@ -4959,7 +5066,7 @@ AttachDecision SetPropIRGenerator::tryAttachSetTypedArrayElement(
bool handleOOB = false;
int64_t indexInt64;
if (!ValueIsInt64Index(idVal_, &indexInt64) || indexInt64 < 0 ||
- uint64_t(indexInt64) >= tarr->length()) {
+ uint64_t(indexInt64) >= tarr->length().valueOr(0)) {
handleOOB = true;
}
@@ -4980,8 +5087,9 @@ AttachDecision SetPropIRGenerator::tryAttachSetTypedArrayElement(
ValOperandId keyId = setElemKeyValueId();
IntPtrOperandId indexId = guardToIntPtrIndex(idVal_, keyId, handleOOB);
+ auto viewKind = ToArrayBufferViewKind(tarr);
writer.storeTypedArrayElement(objId, elementType, indexId, rhsValId,
- handleOOB);
+ handleOOB, viewKind);
writer.returnFromIC();
trackAttached(handleOOB ? "SetTypedElementOOB" : "SetTypedElement");
@@ -6470,9 +6578,7 @@ AttachDecision InlinableNativeIRGenerator::tryAttachArrayIsArray() {
AttachDecision InlinableNativeIRGenerator::tryAttachDataViewGet(
Scalar::Type type) {
// Ensure |this| is a DataViewObject.
- // TODO: Support resizable dataviews. (bug 1842999)
- if (!thisval_.isObject() ||
- !thisval_.toObject().is<FixedLengthDataViewObject>()) {
+ if (!thisval_.isObject() || !thisval_.toObject().is<DataViewObject>()) {
return AttachDecision::NoAction;
}
@@ -6488,11 +6594,12 @@ AttachDecision InlinableNativeIRGenerator::tryAttachDataViewGet(
return AttachDecision::NoAction;
}
- auto* dv = &thisval_.toObject().as<FixedLengthDataViewObject>();
+ auto* dv = &thisval_.toObject().as<DataViewObject>();
// Bounds check the offset.
- if (offsetInt64 < 0 ||
- !dv->offsetIsInBounds(Scalar::byteSize(type), offsetInt64)) {
+ size_t byteLength = dv->byteLength().valueOr(0);
+ if (offsetInt64 < 0 || !DataViewObject::offsetIsInBounds(
+ Scalar::byteSize(type), offsetInt64, byteLength)) {
return AttachDecision::NoAction;
}
@@ -6501,7 +6608,7 @@ AttachDecision InlinableNativeIRGenerator::tryAttachDataViewGet(
bool forceDoubleForUint32 = false;
if (type == Scalar::Uint32) {
bool isLittleEndian = argc_ > 1 && args_[1].toBoolean();
- uint32_t res = dv->read<uint32_t>(offsetInt64, isLittleEndian);
+ uint32_t res = dv->read<uint32_t>(offsetInt64, byteLength, isLittleEndian);
forceDoubleForUint32 = res >= INT32_MAX;
}
@@ -6515,8 +6622,14 @@ AttachDecision InlinableNativeIRGenerator::tryAttachDataViewGet(
ValOperandId thisValId =
writer.loadArgumentFixedSlot(ArgumentKind::This, argc_);
ObjOperandId objId = writer.guardToObject(thisValId);
- emitOptimisticClassGuard(objId, &thisval_.toObject(),
- GuardClassKind::FixedLengthDataView);
+
+ if (dv->is<FixedLengthDataViewObject>()) {
+ emitOptimisticClassGuard(objId, &thisval_.toObject(),
+ GuardClassKind::FixedLengthDataView);
+ } else {
+ emitOptimisticClassGuard(objId, &thisval_.toObject(),
+ GuardClassKind::ResizableDataView);
+ }
// Convert offset to intPtr.
ValOperandId offsetId =
@@ -6533,8 +6646,10 @@ AttachDecision InlinableNativeIRGenerator::tryAttachDataViewGet(
boolLittleEndianId = writer.loadBooleanConstant(false);
}
+ auto viewKind = ToArrayBufferViewKind(dv);
writer.loadDataViewValueResult(objId, intPtrOffsetId, boolLittleEndianId,
- type, forceDoubleForUint32);
+ type, forceDoubleForUint32, viewKind);
+
writer.returnFromIC();
trackAttached("DataViewGet");
@@ -6544,9 +6659,7 @@ AttachDecision InlinableNativeIRGenerator::tryAttachDataViewGet(
AttachDecision InlinableNativeIRGenerator::tryAttachDataViewSet(
Scalar::Type type) {
// Ensure |this| is a DataViewObject.
- // TODO: Support resizable dataviews. (bug 1842999)
- if (!thisval_.isObject() ||
- !thisval_.toObject().is<FixedLengthDataViewObject>()) {
+ if (!thisval_.isObject() || !thisval_.toObject().is<DataViewObject>()) {
return AttachDecision::NoAction;
}
@@ -6565,11 +6678,12 @@ AttachDecision InlinableNativeIRGenerator::tryAttachDataViewSet(
return AttachDecision::NoAction;
}
- auto* dv = &thisval_.toObject().as<FixedLengthDataViewObject>();
+ auto* dv = &thisval_.toObject().as<DataViewObject>();
// Bounds check the offset.
- if (offsetInt64 < 0 ||
- !dv->offsetIsInBounds(Scalar::byteSize(type), offsetInt64)) {
+ size_t byteLength = dv->byteLength().valueOr(0);
+ if (offsetInt64 < 0 || !DataViewObject::offsetIsInBounds(
+ Scalar::byteSize(type), offsetInt64, byteLength)) {
return AttachDecision::NoAction;
}
@@ -6583,8 +6697,14 @@ AttachDecision InlinableNativeIRGenerator::tryAttachDataViewSet(
ValOperandId thisValId =
writer.loadArgumentFixedSlot(ArgumentKind::This, argc_);
ObjOperandId objId = writer.guardToObject(thisValId);
- emitOptimisticClassGuard(objId, &thisval_.toObject(),
- GuardClassKind::FixedLengthDataView);
+
+ if (dv->is<FixedLengthDataViewObject>()) {
+ emitOptimisticClassGuard(objId, &thisval_.toObject(),
+ GuardClassKind::FixedLengthDataView);
+ } else {
+ emitOptimisticClassGuard(objId, &thisval_.toObject(),
+ GuardClassKind::ResizableDataView);
+ }
// Convert offset to intPtr.
ValOperandId offsetId =
@@ -6606,8 +6726,10 @@ AttachDecision InlinableNativeIRGenerator::tryAttachDataViewSet(
boolLittleEndianId = writer.loadBooleanConstant(false);
}
+ auto viewKind = ToArrayBufferViewKind(dv);
writer.storeDataViewValueResult(objId, intPtrOffsetId, numericValueId,
- boolLittleEndianId, type);
+ boolLittleEndianId, type, viewKind);
+
writer.returnFromIC();
trackAttached("DataViewSet");
@@ -6949,19 +7071,84 @@ AttachDecision InlinableNativeIRGenerator::tryAttachGuardToClass(
return AttachDecision::Attach;
}
+AttachDecision InlinableNativeIRGenerator::tryAttachGuardToClass(
+ GuardClassKind kind) {
+ // Self-hosted code calls this with an object argument.
+ MOZ_ASSERT(argc_ == 1);
+ MOZ_ASSERT(args_[0].isObject());
+
+ // Class must match.
+ const JSClass* clasp = ClassFor(kind);
+ if (args_[0].toObject().getClass() != clasp) {
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Note: we don't need to call emitNativeCalleeGuard for intrinsics.
+
+ // Guard that the argument is an object.
+ ValOperandId argId = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ ObjOperandId objId = writer.guardToObject(argId);
+
+ // Guard that the object has the correct class.
+ writer.guardClass(objId, kind);
+
+ // Return the object.
+ writer.loadObjectResult(objId);
+ writer.returnFromIC();
+
+ trackAttached("GuardToClass");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachGuardToEitherClass(
+ GuardClassKind kind1, GuardClassKind kind2) {
+ MOZ_ASSERT(kind1 != kind2,
+ "prefer tryAttachGuardToClass for the same class case");
+
+ // Self-hosted code calls this with an object argument.
+ MOZ_ASSERT(argc_ == 1);
+ MOZ_ASSERT(args_[0].isObject());
+
+ // Class must match.
+ const JSClass* clasp1 = ClassFor(kind1);
+ const JSClass* clasp2 = ClassFor(kind2);
+ const JSClass* objClass = args_[0].toObject().getClass();
+ if (objClass != clasp1 && objClass != clasp2) {
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Note: we don't need to call emitNativeCalleeGuard for intrinsics.
+
+ // Guard that the argument is an object.
+ ValOperandId argId = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ ObjOperandId objId = writer.guardToObject(argId);
+
+ // Guard that the object has the correct class.
+ writer.guardEitherClass(objId, kind1, kind2);
+
+ // Return the object.
+ writer.loadObjectResult(objId);
+ writer.returnFromIC();
+
+ trackAttached("GuardToEitherClass");
+ return AttachDecision::Attach;
+}
+
AttachDecision InlinableNativeIRGenerator::tryAttachGuardToArrayBuffer() {
- // TODO: Support resizable ArrayBuffers (bug 1842999), for now simply
- // pass through to tryAttachGuardToClass which guards on
- // FixedLengthArrayBufferObject.
- return tryAttachGuardToClass(InlinableNative::IntrinsicGuardToArrayBuffer);
+ return tryAttachGuardToEitherClass(GuardClassKind::FixedLengthArrayBuffer,
+ GuardClassKind::ResizableArrayBuffer);
}
AttachDecision InlinableNativeIRGenerator::tryAttachGuardToSharedArrayBuffer() {
- // TODO: Support resizable SharedArrayBuffers (bug 1842999), for now simply
- // pass through to tryAttachGuardToClass which guards on
- // FixedLengthSharedArrayBufferObject.
- return tryAttachGuardToClass(
- InlinableNative::IntrinsicGuardToSharedArrayBuffer);
+ return tryAttachGuardToEitherClass(
+ GuardClassKind::FixedLengthSharedArrayBuffer,
+ GuardClassKind::GrowableSharedArrayBuffer);
}
AttachDecision InlinableNativeIRGenerator::tryAttachHasClass(
@@ -8924,7 +9111,7 @@ AttachDecision InlinableNativeIRGenerator::tryAttachReflectGetPrototypeOf() {
return AttachDecision::Attach;
}
-static bool AtomicsMeetsPreconditions(FixedLengthTypedArrayObject* typedArray,
+static bool AtomicsMeetsPreconditions(TypedArrayObject* typedArray,
const Value& index) {
switch (typedArray->type()) {
case Scalar::Int8:
@@ -8954,7 +9141,8 @@ static bool AtomicsMeetsPreconditions(FixedLengthTypedArrayObject* typedArray,
if (!ValueIsInt64Index(index, &indexInt64)) {
return false;
}
- if (indexInt64 < 0 || uint64_t(indexInt64) >= typedArray->length()) {
+ if (indexInt64 < 0 ||
+ uint64_t(indexInt64) >= typedArray->length().valueOr(0)) {
return false;
}
@@ -8971,17 +9159,15 @@ AttachDecision InlinableNativeIRGenerator::tryAttachAtomicsCompareExchange() {
return AttachDecision::NoAction;
}
- // TODO: Support resizable typed arrays. (bug 1842999)
// Arguments: typedArray, index (number), expected, replacement.
- if (!args_[0].isObject() ||
- !args_[0].toObject().is<FixedLengthTypedArrayObject>()) {
+ if (!args_[0].isObject() || !args_[0].toObject().is<TypedArrayObject>()) {
return AttachDecision::NoAction;
}
if (!args_[1].isNumber()) {
return AttachDecision::NoAction;
}
- auto* typedArray = &args_[0].toObject().as<FixedLengthTypedArrayObject>();
+ auto* typedArray = &args_[0].toObject().as<TypedArrayObject>();
if (!AtomicsMeetsPreconditions(typedArray, args_[1])) {
return AttachDecision::NoAction;
}
@@ -9022,8 +9208,10 @@ AttachDecision InlinableNativeIRGenerator::tryAttachAtomicsCompareExchange() {
OperandId numericReplacementId =
emitNumericGuard(replacementId, args_[3], elementType);
+ auto viewKind = ToArrayBufferViewKind(typedArray);
writer.atomicsCompareExchangeResult(objId, intPtrIndexId, numericExpectedId,
- numericReplacementId, typedArray->type());
+ numericReplacementId, typedArray->type(),
+ viewKind);
writer.returnFromIC();
trackAttached("AtomicsCompareExchange");
@@ -9040,17 +9228,15 @@ bool InlinableNativeIRGenerator::canAttachAtomicsReadWriteModify() {
return false;
}
- // TODO: Support resizable typed arrays. (bug 1842999)
// Arguments: typedArray, index (number), value.
- if (!args_[0].isObject() ||
- !args_[0].toObject().is<FixedLengthTypedArrayObject>()) {
+ if (!args_[0].isObject() || !args_[0].toObject().is<TypedArrayObject>()) {
return false;
}
if (!args_[1].isNumber()) {
return false;
}
- auto* typedArray = &args_[0].toObject().as<FixedLengthTypedArrayObject>();
+ auto* typedArray = &args_[0].toObject().as<TypedArrayObject>();
if (!AtomicsMeetsPreconditions(typedArray, args_[1])) {
return false;
}
@@ -9064,7 +9250,7 @@ InlinableNativeIRGenerator::AtomicsReadWriteModifyOperands
InlinableNativeIRGenerator::emitAtomicsReadWriteModifyOperands() {
MOZ_ASSERT(canAttachAtomicsReadWriteModify());
- auto* typedArray = &args_[0].toObject().as<FixedLengthTypedArrayObject>();
+ auto* typedArray = &args_[0].toObject().as<TypedArrayObject>();
// Initialize the input operand.
initializeInputOperand();
@@ -9099,10 +9285,11 @@ AttachDecision InlinableNativeIRGenerator::tryAttachAtomicsExchange() {
auto [objId, intPtrIndexId, numericValueId] =
emitAtomicsReadWriteModifyOperands();
- auto* typedArray = &args_[0].toObject().as<FixedLengthTypedArrayObject>();
+ auto* typedArray = &args_[0].toObject().as<TypedArrayObject>();
+ auto viewKind = ToArrayBufferViewKind(typedArray);
writer.atomicsExchangeResult(objId, intPtrIndexId, numericValueId,
- typedArray->type());
+ typedArray->type(), viewKind);
writer.returnFromIC();
trackAttached("AtomicsExchange");
@@ -9117,11 +9304,12 @@ AttachDecision InlinableNativeIRGenerator::tryAttachAtomicsAdd() {
auto [objId, intPtrIndexId, numericValueId] =
emitAtomicsReadWriteModifyOperands();
- auto* typedArray = &args_[0].toObject().as<FixedLengthTypedArrayObject>();
+ auto* typedArray = &args_[0].toObject().as<TypedArrayObject>();
bool forEffect = ignoresResult();
+ auto viewKind = ToArrayBufferViewKind(typedArray);
writer.atomicsAddResult(objId, intPtrIndexId, numericValueId,
- typedArray->type(), forEffect);
+ typedArray->type(), forEffect, viewKind);
writer.returnFromIC();
trackAttached("AtomicsAdd");
@@ -9136,11 +9324,12 @@ AttachDecision InlinableNativeIRGenerator::tryAttachAtomicsSub() {
auto [objId, intPtrIndexId, numericValueId] =
emitAtomicsReadWriteModifyOperands();
- auto* typedArray = &args_[0].toObject().as<FixedLengthTypedArrayObject>();
+ auto* typedArray = &args_[0].toObject().as<TypedArrayObject>();
bool forEffect = ignoresResult();
+ auto viewKind = ToArrayBufferViewKind(typedArray);
writer.atomicsSubResult(objId, intPtrIndexId, numericValueId,
- typedArray->type(), forEffect);
+ typedArray->type(), forEffect, viewKind);
writer.returnFromIC();
trackAttached("AtomicsSub");
@@ -9155,11 +9344,12 @@ AttachDecision InlinableNativeIRGenerator::tryAttachAtomicsAnd() {
auto [objId, intPtrIndexId, numericValueId] =
emitAtomicsReadWriteModifyOperands();
- auto* typedArray = &args_[0].toObject().as<FixedLengthTypedArrayObject>();
+ auto* typedArray = &args_[0].toObject().as<TypedArrayObject>();
bool forEffect = ignoresResult();
+ auto viewKind = ToArrayBufferViewKind(typedArray);
writer.atomicsAndResult(objId, intPtrIndexId, numericValueId,
- typedArray->type(), forEffect);
+ typedArray->type(), forEffect, viewKind);
writer.returnFromIC();
trackAttached("AtomicsAnd");
@@ -9174,11 +9364,12 @@ AttachDecision InlinableNativeIRGenerator::tryAttachAtomicsOr() {
auto [objId, intPtrIndexId, numericValueId] =
emitAtomicsReadWriteModifyOperands();
- auto* typedArray = &args_[0].toObject().as<FixedLengthTypedArrayObject>();
+ auto* typedArray = &args_[0].toObject().as<TypedArrayObject>();
bool forEffect = ignoresResult();
+ auto viewKind = ToArrayBufferViewKind(typedArray);
writer.atomicsOrResult(objId, intPtrIndexId, numericValueId,
- typedArray->type(), forEffect);
+ typedArray->type(), forEffect, viewKind);
writer.returnFromIC();
trackAttached("AtomicsOr");
@@ -9193,11 +9384,12 @@ AttachDecision InlinableNativeIRGenerator::tryAttachAtomicsXor() {
auto [objId, intPtrIndexId, numericValueId] =
emitAtomicsReadWriteModifyOperands();
- auto* typedArray = &args_[0].toObject().as<FixedLengthTypedArrayObject>();
+ auto* typedArray = &args_[0].toObject().as<TypedArrayObject>();
bool forEffect = ignoresResult();
+ auto viewKind = ToArrayBufferViewKind(typedArray);
writer.atomicsXorResult(objId, intPtrIndexId, numericValueId,
- typedArray->type(), forEffect);
+ typedArray->type(), forEffect, viewKind);
writer.returnFromIC();
trackAttached("AtomicsXor");
@@ -9214,17 +9406,15 @@ AttachDecision InlinableNativeIRGenerator::tryAttachAtomicsLoad() {
return AttachDecision::NoAction;
}
- // TODO: Support resizable typed arrays. (bug 1842999)
// Arguments: typedArray, index (number).
- if (!args_[0].isObject() ||
- !args_[0].toObject().is<FixedLengthTypedArrayObject>()) {
+ if (!args_[0].isObject() || !args_[0].toObject().is<TypedArrayObject>()) {
return AttachDecision::NoAction;
}
if (!args_[1].isNumber()) {
return AttachDecision::NoAction;
}
- auto* typedArray = &args_[0].toObject().as<FixedLengthTypedArrayObject>();
+ auto* typedArray = &args_[0].toObject().as<TypedArrayObject>();
if (!AtomicsMeetsPreconditions(typedArray, args_[1])) {
return AttachDecision::NoAction;
}
@@ -9245,7 +9435,8 @@ AttachDecision InlinableNativeIRGenerator::tryAttachAtomicsLoad() {
IntPtrOperandId intPtrIndexId =
guardToIntPtrIndex(args_[1], indexId, /* supportOOB = */ false);
- writer.atomicsLoadResult(objId, intPtrIndexId, typedArray->type());
+ auto viewKind = ToArrayBufferViewKind(typedArray);
+ writer.atomicsLoadResult(objId, intPtrIndexId, typedArray->type(), viewKind);
writer.returnFromIC();
trackAttached("AtomicsLoad");
@@ -9271,17 +9462,15 @@ AttachDecision InlinableNativeIRGenerator::tryAttachAtomicsStore() {
// obviously unused or if the argument is already Int32 and thus requires no
// conversion.
- // TODO: Support resizable typed arrays. (bug 1842999)
// Arguments: typedArray, index (number), value.
- if (!args_[0].isObject() ||
- !args_[0].toObject().is<FixedLengthTypedArrayObject>()) {
+ if (!args_[0].isObject() || !args_[0].toObject().is<TypedArrayObject>()) {
return AttachDecision::NoAction;
}
if (!args_[1].isNumber()) {
return AttachDecision::NoAction;
}
- auto* typedArray = &args_[0].toObject().as<FixedLengthTypedArrayObject>();
+ auto* typedArray = &args_[0].toObject().as<TypedArrayObject>();
if (!AtomicsMeetsPreconditions(typedArray, args_[1])) {
return AttachDecision::NoAction;
}
@@ -9323,8 +9512,9 @@ AttachDecision InlinableNativeIRGenerator::tryAttachAtomicsStore() {
numericValueId = emitNumericGuard(valueId, args_[2], elementType);
}
+ auto viewKind = ToArrayBufferViewKind(typedArray);
writer.atomicsStoreResult(objId, intPtrIndexId, numericValueId,
- typedArray->type());
+ typedArray->type(), viewKind);
writer.returnFromIC();
trackAttached("AtomicsStore");
@@ -10182,12 +10372,7 @@ AttachDecision InlinableNativeIRGenerator::tryAttachTypedArrayByteOffset() {
MOZ_ASSERT(args_[0].isObject());
MOZ_ASSERT(args_[0].toObject().is<TypedArrayObject>());
- // TODO: Support resizable typed arrays. (bug 1842999)
- if (!args_[0].toObject().is<FixedLengthTypedArrayObject>()) {
- return AttachDecision::NoAction;
- }
-
- auto* tarr = &args_[0].toObject().as<FixedLengthTypedArrayObject>();
+ auto* tarr = &args_[0].toObject().as<TypedArrayObject>();
// Initialize the input operand.
initializeInputOperand();
@@ -10196,12 +10381,25 @@ AttachDecision InlinableNativeIRGenerator::tryAttachTypedArrayByteOffset() {
ValOperandId argId = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
ObjOperandId objArgId = writer.guardToObject(argId);
- writer.guardIsFixedLengthTypedArray(objArgId);
- if (tarr->byteOffset() <= INT32_MAX) {
- writer.arrayBufferViewByteOffsetInt32Result(objArgId);
+
+ EmitGuardTypedArray(writer, tarr, objArgId);
+
+ size_t byteOffset = tarr->byteOffsetMaybeOutOfBounds();
+ if (tarr->is<FixedLengthTypedArrayObject>()) {
+ if (byteOffset <= INT32_MAX) {
+ writer.arrayBufferViewByteOffsetInt32Result(objArgId);
+ } else {
+ writer.arrayBufferViewByteOffsetDoubleResult(objArgId);
+ }
} else {
- writer.arrayBufferViewByteOffsetDoubleResult(objArgId);
+ if (byteOffset <= INT32_MAX) {
+ writer.resizableTypedArrayByteOffsetMaybeOutOfBoundsInt32Result(objArgId);
+ } else {
+ writer.resizableTypedArrayByteOffsetMaybeOutOfBoundsDoubleResult(
+ objArgId);
+ }
}
+
writer.returnFromIC();
trackAttached("IntrinsicTypedArrayByteOffset");
@@ -10229,7 +10427,7 @@ AttachDecision InlinableNativeIRGenerator::tryAttachTypedArrayElementSize() {
}
AttachDecision InlinableNativeIRGenerator::tryAttachTypedArrayLength(
- bool isPossiblyWrapped) {
+ bool isPossiblyWrapped, bool allowOutOfBounds) {
// Self-hosted code calls this with a single, possibly wrapped,
// TypedArrayObject argument.
MOZ_ASSERT(argc_ == 1);
@@ -10242,12 +10440,19 @@ AttachDecision InlinableNativeIRGenerator::tryAttachTypedArrayLength(
MOZ_ASSERT(args_[0].toObject().is<TypedArrayObject>());
- // TODO: Support resizable typed arrays. (bug 1842999)
- if (!args_[0].toObject().is<FixedLengthTypedArrayObject>()) {
- return AttachDecision::NoAction;
- }
+ auto* tarr = &args_[0].toObject().as<TypedArrayObject>();
- auto* tarr = &args_[0].toObject().as<FixedLengthTypedArrayObject>();
+ // Don't optimize when a resizable TypedArray is out-of-bounds and
+ // out-of-bounds isn't allowed.
+ auto length = tarr->length();
+ if (length.isNothing() && !tarr->hasDetachedBuffer()) {
+ MOZ_ASSERT(tarr->is<ResizableTypedArrayObject>());
+ MOZ_ASSERT(tarr->isOutOfBounds());
+
+ if (!allowOutOfBounds) {
+ return AttachDecision::NoAction;
+ }
+ }
// Initialize the input operand.
initializeInputOperand();
@@ -10261,11 +10466,24 @@ AttachDecision InlinableNativeIRGenerator::tryAttachTypedArrayLength(
writer.guardIsNotProxy(objArgId);
}
- writer.guardIsFixedLengthTypedArray(objArgId);
- if (tarr->length() <= INT32_MAX) {
- writer.loadArrayBufferViewLengthInt32Result(objArgId);
+ EmitGuardTypedArray(writer, tarr, objArgId);
+
+ if (tarr->is<FixedLengthTypedArrayObject>()) {
+ if (length.valueOr(0) <= INT32_MAX) {
+ writer.loadArrayBufferViewLengthInt32Result(objArgId);
+ } else {
+ writer.loadArrayBufferViewLengthDoubleResult(objArgId);
+ }
} else {
- writer.loadArrayBufferViewLengthDoubleResult(objArgId);
+ if (!allowOutOfBounds) {
+ writer.guardResizableArrayBufferViewInBoundsOrDetached(objArgId);
+ }
+
+ if (length.valueOr(0) <= INT32_MAX) {
+ writer.resizableTypedArrayLengthInt32Result(objArgId);
+ } else {
+ writer.resizableTypedArrayLengthDoubleResult(objArgId);
+ }
}
writer.returnFromIC();
@@ -10273,13 +10491,6 @@ AttachDecision InlinableNativeIRGenerator::tryAttachTypedArrayLength(
return AttachDecision::Attach;
}
-AttachDecision
-InlinableNativeIRGenerator::tryAttachTypedArrayLengthZeroOnOutOfBounds() {
- // We don't yet inline resizable buffers, so this operation is equivalent to
- // the inline code path for tryAttachTypedArrayLength().
- return tryAttachTypedArrayLength(/* isPossiblyWrapped = */ false);
-}
-
AttachDecision InlinableNativeIRGenerator::tryAttachArrayBufferByteLength(
bool isPossiblyWrapped) {
// Self-hosted code calls this with a single, possibly wrapped,
@@ -10296,11 +10507,6 @@ AttachDecision InlinableNativeIRGenerator::tryAttachArrayBufferByteLength(
auto* buffer = &args_[0].toObject().as<ArrayBufferObject>();
- // TODO: Support resizable buffers. (bug 1842999)
- if (buffer->isResizable()) {
- return AttachDecision::NoAction;
- }
-
// Initialize the input operand.
initializeInputOperand();
@@ -10662,14 +10868,6 @@ AttachDecision InlinableNativeIRGenerator::tryAttachTypedArrayConstructor() {
if (args_[0].isObject() && args_[0].toObject().is<ProxyObject>()) {
return AttachDecision::NoAction;
}
- if (args_[0].isObject() &&
- args_[0].toObject().is<ResizableArrayBufferObject>()) {
- return AttachDecision::NoAction;
- }
- if (args_[0].isObject() &&
- args_[0].toObject().is<GrowableSharedArrayBufferObject>()) {
- return AttachDecision::NoAction;
- }
#ifdef JS_CODEGEN_X86
// Unfortunately NewTypedArrayFromArrayBufferResult needs more registers than
@@ -10714,9 +10912,13 @@ AttachDecision InlinableNativeIRGenerator::tryAttachTypedArrayConstructor() {
// From ArrayBuffer.
if (obj->is<FixedLengthArrayBufferObject>()) {
writer.guardClass(objId, GuardClassKind::FixedLengthArrayBuffer);
- } else {
- MOZ_ASSERT(obj->is<FixedLengthSharedArrayBufferObject>());
+ } else if (obj->is<FixedLengthSharedArrayBufferObject>()) {
writer.guardClass(objId, GuardClassKind::FixedLengthSharedArrayBuffer);
+ } else if (obj->is<ResizableArrayBufferObject>()) {
+ writer.guardClass(objId, GuardClassKind::ResizableArrayBuffer);
+ } else {
+ MOZ_ASSERT(obj->is<GrowableSharedArrayBufferObject>());
+ writer.guardClass(objId, GuardClassKind::GrowableSharedArrayBuffer);
}
ValOperandId byteOffsetId;
if (argc_ > 1) {
@@ -11584,7 +11786,7 @@ AttachDecision InlinableNativeIRGenerator::tryAttachStub() {
// Map intrinsics.
case InlinableNative::IntrinsicGuardToMapObject:
- return tryAttachGuardToClass(native);
+ return tryAttachGuardToClass(GuardClassKind::Map);
case InlinableNative::IntrinsicGetNextMapEntryForIterator:
return tryAttachGetNextMapSetEntryForIterator(/* isMap = */ true);
@@ -11612,7 +11814,7 @@ AttachDecision InlinableNativeIRGenerator::tryAttachStub() {
// Set intrinsics.
case InlinableNative::IntrinsicGuardToSetObject:
- return tryAttachGuardToClass(native);
+ return tryAttachGuardToClass(GuardClassKind::Set);
case InlinableNative::IntrinsicGetNextSetEntryForIterator:
return tryAttachGetNextMapSetEntryForIterator(/* isMap = */ false);
@@ -11642,11 +11844,14 @@ AttachDecision InlinableNativeIRGenerator::tryAttachStub() {
case InlinableNative::IntrinsicTypedArrayElementSize:
return tryAttachTypedArrayElementSize();
case InlinableNative::IntrinsicTypedArrayLength:
- return tryAttachTypedArrayLength(/* isPossiblyWrapped = */ false);
+ return tryAttachTypedArrayLength(/* isPossiblyWrapped = */ false,
+ /* allowOutOfBounds = */ false);
case InlinableNative::IntrinsicTypedArrayLengthZeroOnOutOfBounds:
- return tryAttachTypedArrayLengthZeroOnOutOfBounds();
+ return tryAttachTypedArrayLength(/* isPossiblyWrapped = */ false,
+ /* allowOutOfBounds = */ true);
case InlinableNative::IntrinsicPossiblyWrappedTypedArrayLength:
- return tryAttachTypedArrayLength(/* isPossiblyWrapped = */ true);
+ return tryAttachTypedArrayLength(/* isPossiblyWrapped = */ true,
+ /* allowOutOfBounds = */ false);
// Reflect natives.
case InlinableNative::ReflectGetPrototypeOf:
diff --git a/js/src/jit/CacheIR.h b/js/src/jit/CacheIR.h
index b483257d12..9bedbb7ddc 100644
--- a/js/src/jit/CacheIR.h
+++ b/js/src/jit/CacheIR.h
@@ -515,8 +515,11 @@ enum class GuardClassKind : uint8_t {
Array,
PlainObject,
FixedLengthArrayBuffer,
+ ResizableArrayBuffer,
FixedLengthSharedArrayBuffer,
+ GrowableSharedArrayBuffer,
FixedLengthDataView,
+ ResizableDataView,
MappedArguments,
UnmappedArguments,
WindowProxy,
@@ -526,6 +529,13 @@ enum class GuardClassKind : uint8_t {
Map,
};
+const JSClass* ClassFor(GuardClassKind kind);
+
+enum class ArrayBufferViewKind : uint8_t {
+ FixedLength,
+ Resizable,
+};
+
} // namespace jit
} // namespace js
diff --git a/js/src/jit/CacheIRCompiler.cpp b/js/src/jit/CacheIRCompiler.cpp
index 73f3831371..1467cebe08 100644
--- a/js/src/jit/CacheIRCompiler.cpp
+++ b/js/src/jit/CacheIRCompiler.cpp
@@ -18,7 +18,6 @@
#include "jsmath.h"
#include "builtin/DataViewObject.h"
-#include "builtin/MapObject.h"
#include "builtin/Object.h"
#include "gc/GCEnum.h"
#include "gc/SweepingAPI.h" // js::gc::AutoLockStoreBuffer
@@ -1365,6 +1364,8 @@ bool jit::TraceWeakCacheIRStub(JSTracer* trc, T* stub,
const CacheIRStubInfo* stubInfo) {
using Type = StubField::Type;
+ bool isDead = false;
+
uint32_t field = 0;
size_t offset = 0;
while (true) {
@@ -1375,7 +1376,7 @@ bool jit::TraceWeakCacheIRStub(JSTracer* trc, T* stub,
stubInfo->getStubField<T, Type::WeakShape>(stub, offset);
auto r = TraceWeakEdge(trc, &shapeField, "cacheir-weak-shape");
if (r.isDead()) {
- return false;
+ isDead = true;
}
break;
}
@@ -1384,7 +1385,7 @@ bool jit::TraceWeakCacheIRStub(JSTracer* trc, T* stub,
stubInfo->getStubField<T, Type::WeakObject>(stub, offset);
auto r = TraceWeakEdge(trc, &objectField, "cacheir-weak-object");
if (r.isDead()) {
- return false;
+ isDead = true;
}
break;
}
@@ -1393,7 +1394,7 @@ bool jit::TraceWeakCacheIRStub(JSTracer* trc, T* stub,
stubInfo->getStubField<T, Type::WeakBaseScript>(stub, offset);
auto r = TraceWeakEdge(trc, &scriptField, "cacheir-weak-script");
if (r.isDead()) {
- return false;
+ isDead = true;
}
break;
}
@@ -1403,12 +1404,13 @@ bool jit::TraceWeakCacheIRStub(JSTracer* trc, T* stub,
auto r = TraceWeakEdge(trc, &getterSetterField,
"cacheir-weak-getter-setter");
if (r.isDead()) {
- return false;
+ isDead = true;
}
break;
}
case Type::Limit:
- return true; // Done.
+ // Done.
+ return !isDead;
case Type::RawInt32:
case Type::RawPointer:
case Type::Shape:
@@ -2148,6 +2150,30 @@ bool CacheIRCompiler::emitGuardNonDoubleType(ValOperandId inputId,
return true;
}
+static const JSClass* ClassFor(JSContext* cx, GuardClassKind kind) {
+ switch (kind) {
+ case GuardClassKind::Array:
+ case GuardClassKind::PlainObject:
+ case GuardClassKind::FixedLengthArrayBuffer:
+ case GuardClassKind::ResizableArrayBuffer:
+ case GuardClassKind::FixedLengthSharedArrayBuffer:
+ case GuardClassKind::GrowableSharedArrayBuffer:
+ case GuardClassKind::FixedLengthDataView:
+ case GuardClassKind::ResizableDataView:
+ case GuardClassKind::MappedArguments:
+ case GuardClassKind::UnmappedArguments:
+ case GuardClassKind::Set:
+ case GuardClassKind::Map:
+ case GuardClassKind::BoundFunction:
+ return ClassFor(kind);
+ case GuardClassKind::WindowProxy:
+ return cx->runtime()->maybeWindowProxyClass();
+ case GuardClassKind::JSFunction:
+ MOZ_CRASH("must be handled by caller");
+ }
+ MOZ_CRASH("unexpected kind");
+}
+
bool CacheIRCompiler::emitGuardClass(ObjOperandId objId, GuardClassKind kind) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register obj = allocator.useRegister(masm, objId);
@@ -2169,44 +2195,7 @@ bool CacheIRCompiler::emitGuardClass(ObjOperandId objId, GuardClassKind kind) {
return true;
}
- const JSClass* clasp = nullptr;
- switch (kind) {
- case GuardClassKind::Array:
- clasp = &ArrayObject::class_;
- break;
- case GuardClassKind::PlainObject:
- clasp = &PlainObject::class_;
- break;
- case GuardClassKind::FixedLengthArrayBuffer:
- clasp = &FixedLengthArrayBufferObject::class_;
- break;
- case GuardClassKind::FixedLengthSharedArrayBuffer:
- clasp = &FixedLengthSharedArrayBufferObject::class_;
- break;
- case GuardClassKind::FixedLengthDataView:
- clasp = &FixedLengthDataViewObject::class_;
- break;
- case GuardClassKind::MappedArguments:
- clasp = &MappedArgumentsObject::class_;
- break;
- case GuardClassKind::UnmappedArguments:
- clasp = &UnmappedArgumentsObject::class_;
- break;
- case GuardClassKind::WindowProxy:
- clasp = cx_->runtime()->maybeWindowProxyClass();
- break;
- case GuardClassKind::Set:
- clasp = &SetObject::class_;
- break;
- case GuardClassKind::Map:
- clasp = &MapObject::class_;
- break;
- case GuardClassKind::BoundFunction:
- clasp = &BoundFunctionObject::class_;
- break;
- case GuardClassKind::JSFunction:
- MOZ_CRASH("JSFunction handled before switch");
- }
+ const JSClass* clasp = ClassFor(cx_, kind);
MOZ_ASSERT(clasp);
if (objectGuardNeedsSpectreMitigations(objId)) {
@@ -2220,6 +2209,39 @@ bool CacheIRCompiler::emitGuardClass(ObjOperandId objId, GuardClassKind kind) {
return true;
}
+bool CacheIRCompiler::emitGuardEitherClass(ObjOperandId objId,
+ GuardClassKind kind1,
+ GuardClassKind kind2) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register obj = allocator.useRegister(masm, objId);
+ AutoScratchRegister scratch(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ // We don't yet need this case, so it's unsupported for now.
+ MOZ_ASSERT(kind1 != GuardClassKind::JSFunction &&
+ kind2 != GuardClassKind::JSFunction);
+
+ const JSClass* clasp1 = ClassFor(cx_, kind1);
+ MOZ_ASSERT(clasp1);
+
+ const JSClass* clasp2 = ClassFor(cx_, kind2);
+ MOZ_ASSERT(clasp2);
+
+ if (objectGuardNeedsSpectreMitigations(objId)) {
+ masm.branchTestObjClass(Assembler::NotEqual, obj, {clasp1, clasp2}, scratch,
+ obj, failure->label());
+ } else {
+ masm.branchTestObjClassNoSpectreMitigations(
+ Assembler::NotEqual, obj, {clasp1, clasp2}, scratch, failure->label());
+ }
+
+ return true;
+}
+
bool CacheIRCompiler::emitGuardNullProto(ObjOperandId objId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register obj = allocator.useRegister(masm, objId);
@@ -2569,6 +2591,22 @@ bool CacheIRCompiler::emitGuardIsFixedLengthTypedArray(ObjOperandId objId) {
return true;
}
+bool CacheIRCompiler::emitGuardIsResizableTypedArray(ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ Register obj = allocator.useRegister(masm, objId);
+ AutoScratchRegister scratch(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.loadObjClassUnsafe(obj, scratch);
+ masm.branchIfClassIsNotResizableTypedArray(scratch, failure->label());
+ return true;
+}
+
bool CacheIRCompiler::emitGuardIsNotDOMProxy(ObjOperandId objId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register obj = allocator.useRegister(masm, objId);
@@ -2796,7 +2834,7 @@ bool CacheIRCompiler::emitStringToAtom(StringOperandId stringId) {
masm.branchTest32(Assembler::NonZero, Address(str, JSString::offsetOfFlags()),
Imm32(JSString::ATOM_BIT), &done);
- masm.lookupStringInAtomCacheLastLookups(str, scratch, &vmCall);
+ masm.lookupStringInAtomCacheLastLookups(str, scratch, str, &vmCall);
masm.jump(&done);
masm.bind(&vmCall);
@@ -4760,17 +4798,30 @@ bool CacheIRCompiler::emitLoadDenseElementHoleResult(ObjOperandId objId,
}
bool CacheIRCompiler::emitLoadTypedArrayElementExistsResult(
- ObjOperandId objId, IntPtrOperandId indexId) {
+ ObjOperandId objId, IntPtrOperandId indexId, ArrayBufferViewKind viewKind) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
Register obj = allocator.useRegister(masm, objId);
Register index = allocator.useRegister(masm, indexId);
AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+ Maybe<AutoScratchRegister> scratch2;
+ if (viewKind == ArrayBufferViewKind::Resizable) {
+ scratch2.emplace(allocator, masm);
+ }
Label outOfBounds, done;
// Bounds check.
- masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
+ if (viewKind == ArrayBufferViewKind::FixedLength) {
+ masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
+ } else {
+ // Bounds check doesn't require synchronization. See IsValidIntegerIndex
+ // abstract operation which reads the underlying buffer byte length using
+ // "unordered" memory order.
+ auto sync = Synchronization::None();
+
+ masm.loadResizableTypedArrayLengthIntPtr(sync, obj, scratch, *scratch2);
+ }
masm.branchPtr(Assembler::BelowOrEqual, scratch, index, &outOfBounds);
EmitStoreBoolean(masm, true, output);
masm.jump(&done);
@@ -5039,6 +5090,46 @@ bool CacheIRCompiler::emitArrayBufferViewByteOffsetDoubleResult(
return true;
}
+bool CacheIRCompiler::
+ emitResizableTypedArrayByteOffsetMaybeOutOfBoundsInt32Result(
+ ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
+ AutoScratchRegister scratch2(allocator, masm);
+ Register obj = allocator.useRegister(masm, objId);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.loadResizableTypedArrayByteOffsetMaybeOutOfBoundsIntPtr(obj, scratch1,
+ scratch2);
+ masm.guardNonNegativeIntPtrToInt32(scratch1, failure->label());
+ masm.tagValue(JSVAL_TYPE_INT32, scratch1, output.valueReg());
+ return true;
+}
+
+bool CacheIRCompiler::
+ emitResizableTypedArrayByteOffsetMaybeOutOfBoundsDoubleResult(
+ ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ Register obj = allocator.useRegister(masm, objId);
+ AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
+ AutoScratchRegister scratch2(allocator, masm);
+
+ ScratchDoubleScope fpscratch(masm);
+ masm.loadResizableTypedArrayByteOffsetMaybeOutOfBoundsIntPtr(obj, scratch1,
+ scratch2);
+ masm.convertIntPtrToDouble(scratch1, fpscratch);
+ masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
+ return true;
+}
+
bool CacheIRCompiler::emitTypedArrayByteLengthInt32Result(ObjOperandId objId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
@@ -5081,6 +5172,100 @@ bool CacheIRCompiler::emitTypedArrayByteLengthDoubleResult(ObjOperandId objId) {
return true;
}
+bool CacheIRCompiler::emitResizableTypedArrayByteLengthInt32Result(
+ ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
+ AutoScratchRegister scratch2(allocator, masm);
+ Register obj = allocator.useRegister(masm, objId);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ // Explicit |byteLength| accesses are seq-consistent atomic loads.
+ auto sync = Synchronization::Load();
+
+ masm.loadResizableTypedArrayLengthIntPtr(sync, obj, scratch1, scratch2);
+ masm.guardNonNegativeIntPtrToInt32(scratch1, failure->label());
+ masm.typedArrayElementSize(obj, scratch2);
+
+ masm.branchMul32(Assembler::Overflow, scratch2.get(), scratch1,
+ failure->label());
+
+ masm.tagValue(JSVAL_TYPE_INT32, scratch1, output.valueReg());
+ return true;
+}
+
+bool CacheIRCompiler::emitResizableTypedArrayByteLengthDoubleResult(
+ ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
+ AutoScratchRegister scratch2(allocator, masm);
+ Register obj = allocator.useRegister(masm, objId);
+
+ // Explicit |byteLength| accesses are seq-consistent atomic loads.
+ auto sync = Synchronization::Load();
+
+ masm.loadResizableTypedArrayLengthIntPtr(sync, obj, scratch1, scratch2);
+ masm.typedArrayElementSize(obj, scratch2);
+ masm.mulPtr(scratch2, scratch1);
+
+ ScratchDoubleScope fpscratch(masm);
+ masm.convertIntPtrToDouble(scratch1, fpscratch);
+ masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
+ return true;
+}
+
+bool CacheIRCompiler::emitResizableTypedArrayLengthInt32Result(
+ ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
+ AutoScratchRegister scratch2(allocator, masm);
+ Register obj = allocator.useRegister(masm, objId);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ // Explicit |length| accesses are seq-consistent atomic loads.
+ auto sync = Synchronization::Load();
+
+ masm.loadResizableTypedArrayLengthIntPtr(sync, obj, scratch1, scratch2);
+ masm.guardNonNegativeIntPtrToInt32(scratch1, failure->label());
+
+ masm.tagValue(JSVAL_TYPE_INT32, scratch1, output.valueReg());
+ return true;
+}
+
+bool CacheIRCompiler::emitResizableTypedArrayLengthDoubleResult(
+ ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
+ AutoScratchRegister scratch2(allocator, masm);
+ Register obj = allocator.useRegister(masm, objId);
+
+ // Explicit |length| accesses are seq-consistent atomic loads.
+ auto sync = Synchronization::Load();
+
+ masm.loadResizableTypedArrayLengthIntPtr(sync, obj, scratch1, scratch2);
+
+ ScratchDoubleScope fpscratch(masm);
+ masm.convertIntPtrToDouble(scratch1, fpscratch);
+ masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
+ return true;
+}
+
bool CacheIRCompiler::emitTypedArrayElementSizeResult(ObjOperandId objId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
@@ -5093,6 +5278,92 @@ bool CacheIRCompiler::emitTypedArrayElementSizeResult(ObjOperandId objId) {
return true;
}
+bool CacheIRCompiler::emitResizableDataViewByteLengthInt32Result(
+ ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
+ AutoScratchRegister scratch2(allocator, masm);
+ Register obj = allocator.useRegister(masm, objId);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ // Explicit |byteLength| accesses are seq-consistent atomic loads.
+ auto sync = Synchronization::Load();
+
+ masm.loadResizableDataViewByteLengthIntPtr(sync, obj, scratch1, scratch2);
+ masm.guardNonNegativeIntPtrToInt32(scratch1, failure->label());
+
+ masm.tagValue(JSVAL_TYPE_INT32, scratch1, output.valueReg());
+ return true;
+}
+
+bool CacheIRCompiler::emitResizableDataViewByteLengthDoubleResult(
+ ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
+ AutoScratchRegister scratch2(allocator, masm);
+ Register obj = allocator.useRegister(masm, objId);
+
+ // Explicit |byteLength| accesses are seq-consistent atomic loads.
+ auto sync = Synchronization::Load();
+
+ masm.loadResizableDataViewByteLengthIntPtr(sync, obj, scratch1, scratch2);
+
+ ScratchDoubleScope fpscratch(masm);
+ masm.convertIntPtrToDouble(scratch1, fpscratch);
+ masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
+ return true;
+}
+
+bool CacheIRCompiler::emitGrowableSharedArrayBufferByteLengthInt32Result(
+ ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+ Register obj = allocator.useRegister(masm, objId);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ // Explicit |byteLength| accesses are seq-consistent atomic loads.
+ auto sync = Synchronization::Load();
+
+ masm.loadGrowableSharedArrayBufferByteLengthIntPtr(sync, obj, scratch);
+ masm.guardNonNegativeIntPtrToInt32(scratch, failure->label());
+
+ masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
+ return true;
+}
+
+bool CacheIRCompiler::emitGrowableSharedArrayBufferByteLengthDoubleResult(
+ ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+ Register obj = allocator.useRegister(masm, objId);
+
+ // Explicit |byteLength| accesses are seq-consistent atomic loads.
+ auto sync = Synchronization::Load();
+
+ masm.loadGrowableSharedArrayBufferByteLengthIntPtr(sync, obj, scratch);
+
+ ScratchDoubleScope fpscratch(masm);
+ masm.convertIntPtrToDouble(scratch, fpscratch);
+ masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
+ return true;
+}
+
bool CacheIRCompiler::emitGuardHasAttachedArrayBuffer(ObjOperandId objId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
@@ -5108,6 +5379,42 @@ bool CacheIRCompiler::emitGuardHasAttachedArrayBuffer(ObjOperandId objId) {
return true;
}
+bool CacheIRCompiler::emitGuardResizableArrayBufferViewInBounds(
+ ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoScratchRegister scratch(allocator, masm);
+ Register obj = allocator.useRegister(masm, objId);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.branchIfResizableArrayBufferViewOutOfBounds(obj, scratch,
+ failure->label());
+ return true;
+}
+
+bool CacheIRCompiler::emitGuardResizableArrayBufferViewInBoundsOrDetached(
+ ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoScratchRegister scratch(allocator, masm);
+ Register obj = allocator.useRegister(masm, objId);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ Label done;
+ masm.branchIfResizableArrayBufferViewInBounds(obj, scratch, &done);
+ masm.branchIfHasAttachedArrayBuffer(obj, scratch, failure->label());
+ masm.bind(&done);
+ return true;
+}
+
bool CacheIRCompiler::emitIsTypedArrayConstructorResult(ObjOperandId objId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
@@ -6220,8 +6527,8 @@ bool CacheIRCompiler::emitArrayPush(ObjOperandId objId, ValOperandId rhsId) {
bool CacheIRCompiler::emitStoreTypedArrayElement(ObjOperandId objId,
Scalar::Type elementType,
IntPtrOperandId indexId,
- uint32_t rhsId,
- bool handleOOB) {
+ uint32_t rhsId, bool handleOOB,
+ ArrayBufferViewKind viewKind) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register obj = allocator.useRegister(masm, objId);
Register index = allocator.useRegister(masm, indexId);
@@ -6261,7 +6568,8 @@ bool CacheIRCompiler::emitStoreTypedArrayElement(ObjOperandId objId,
AutoScratchRegister scratch1(allocator, masm);
Maybe<AutoScratchRegister> scratch2;
Maybe<AutoSpectreBoundsScratchRegister> spectreScratch;
- if (Scalar::isBigIntType(elementType)) {
+ if (Scalar::isBigIntType(elementType) ||
+ viewKind == ArrayBufferViewKind::Resizable) {
scratch2.emplace(allocator, masm);
} else {
spectreScratch.emplace(allocator, masm);
@@ -6276,10 +6584,9 @@ bool CacheIRCompiler::emitStoreTypedArrayElement(ObjOperandId objId,
// Bounds check.
Label done;
- Register spectreTemp = scratch2 ? scratch2->get() : spectreScratch->get();
- masm.loadArrayBufferViewLengthIntPtr(obj, scratch1);
- masm.spectreBoundsCheckPtr(index, scratch1, spectreTemp,
- handleOOB ? &done : failure->label());
+ emitTypedArrayBoundsCheck(viewKind, obj, index, scratch1, scratch2,
+ spectreScratch,
+ handleOOB ? &done : failure->label());
// Load the elements vector.
masm.loadPtr(Address(obj, ArrayBufferViewObject::dataOffset()), scratch1);
@@ -6348,9 +6655,61 @@ static void EmitAllocateBigInt(MacroAssembler& masm, Register result,
masm.bind(&done);
}
+void CacheIRCompiler::emitTypedArrayBoundsCheck(ArrayBufferViewKind viewKind,
+ Register obj, Register index,
+ Register scratch,
+ Register maybeScratch,
+ Register spectreScratch,
+ Label* fail) {
+ // |index| must not alias any scratch register.
+ MOZ_ASSERT(index != scratch);
+ MOZ_ASSERT(index != maybeScratch);
+ MOZ_ASSERT(index != spectreScratch);
+
+ if (viewKind == ArrayBufferViewKind::FixedLength) {
+ masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
+ masm.spectreBoundsCheckPtr(index, scratch, spectreScratch, fail);
+ } else {
+ if (maybeScratch == InvalidReg) {
+ // Spill |index| to use it as an additional scratch register.
+ masm.push(index);
+
+ maybeScratch = index;
+ } else {
+ // Use |maybeScratch| when no explicit |spectreScratch| is present.
+ if (spectreScratch == InvalidReg) {
+ spectreScratch = maybeScratch;
+ }
+ }
+
+ // Bounds check doesn't require synchronization. See IsValidIntegerIndex
+ // abstract operation which reads the underlying buffer byte length using
+ // "unordered" memory order.
+ auto sync = Synchronization::None();
+
+ masm.loadResizableTypedArrayLengthIntPtr(sync, obj, scratch, maybeScratch);
+
+ if (maybeScratch == index) {
+ // Restore |index|.
+ masm.pop(index);
+ }
+
+ masm.spectreBoundsCheckPtr(index, scratch, spectreScratch, fail);
+ }
+}
+
+void CacheIRCompiler::emitTypedArrayBoundsCheck(
+ ArrayBufferViewKind viewKind, Register obj, Register index,
+ Register scratch, mozilla::Maybe<Register> maybeScratch,
+ mozilla::Maybe<Register> spectreScratch, Label* fail) {
+ emitTypedArrayBoundsCheck(viewKind, obj, index, scratch,
+ maybeScratch.valueOr(InvalidReg),
+ spectreScratch.valueOr(InvalidReg), fail);
+}
+
bool CacheIRCompiler::emitLoadTypedArrayElementResult(
ObjOperandId objId, IntPtrOperandId indexId, Scalar::Type elementType,
- bool handleOOB, bool forceDoubleForUint32) {
+ bool handleOOB, bool forceDoubleForUint32, ArrayBufferViewKind viewKind) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
Register obj = allocator.useRegister(masm, objId);
@@ -6372,9 +6731,8 @@ bool CacheIRCompiler::emitLoadTypedArrayElementResult(
// Bounds check.
Label outOfBounds;
- masm.loadArrayBufferViewLengthIntPtr(obj, scratch1);
- masm.spectreBoundsCheckPtr(index, scratch1, scratch2,
- handleOOB ? &outOfBounds : failure->label());
+ emitTypedArrayBoundsCheck(viewKind, obj, index, scratch1, scratch2, scratch2,
+ handleOOB ? &outOfBounds : failure->label());
// Allocate BigInt if needed. The code after this should be infallible.
Maybe<Register> bigInt;
@@ -6437,11 +6795,40 @@ bool CacheIRCompiler::emitLoadTypedArrayElementResult(
return true;
}
-static void EmitDataViewBoundsCheck(MacroAssembler& masm, size_t byteSize,
- Register obj, Register offset,
- Register scratch, Label* fail) {
+void CacheIRCompiler::emitDataViewBoundsCheck(ArrayBufferViewKind viewKind,
+ size_t byteSize, Register obj,
+ Register offset, Register scratch,
+ Register maybeScratch,
+ Label* fail) {
+ // |offset| must not alias any scratch register.
+ MOZ_ASSERT(offset != scratch);
+ MOZ_ASSERT(offset != maybeScratch);
+
+ if (viewKind == ArrayBufferViewKind::FixedLength) {
+ masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
+ } else {
+ if (maybeScratch == InvalidReg) {
+ // Spill |offset| to use it as an additional scratch register.
+ masm.push(offset);
+
+ maybeScratch = offset;
+ }
+
+ // Bounds check doesn't require synchronization. See GetViewValue and
+ // SetViewValue abstract operations which read the underlying buffer byte
+ // length using "unordered" memory order.
+ auto sync = Synchronization::None();
+
+ masm.loadResizableDataViewByteLengthIntPtr(sync, obj, scratch,
+ maybeScratch);
+
+ if (maybeScratch == offset) {
+ // Restore |offset|.
+ masm.pop(offset);
+ }
+ }
+
// Ensure both offset < length and offset + (byteSize - 1) < length.
- masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
if (byteSize == 1) {
masm.spectreBoundsCheckPtr(offset, scratch, InvalidReg, fail);
} else {
@@ -6456,7 +6843,7 @@ static void EmitDataViewBoundsCheck(MacroAssembler& masm, size_t byteSize,
bool CacheIRCompiler::emitLoadDataViewValueResult(
ObjOperandId objId, IntPtrOperandId offsetId,
BooleanOperandId littleEndianId, Scalar::Type elementType,
- bool forceDoubleForUint32) {
+ bool forceDoubleForUint32, ArrayBufferViewKind viewKind) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
@@ -6469,6 +6856,18 @@ bool CacheIRCompiler::emitLoadDataViewValueResult(
Register64 outputReg64 = output.valueReg().toRegister64();
Register outputScratch = outputReg64.scratchReg();
+ Register boundsCheckScratch;
+#ifndef JS_CODEGEN_X86
+ Maybe<AutoScratchRegister> maybeBoundsCheckScratch;
+ if (viewKind == ArrayBufferViewKind::Resizable) {
+ maybeBoundsCheckScratch.emplace(allocator, masm);
+ boundsCheckScratch = *maybeBoundsCheckScratch;
+ }
+#else
+ // Not enough registers on x86, so use the other part of outputReg64.
+ boundsCheckScratch = outputReg64.secondScratchReg();
+#endif
+
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
@@ -6476,8 +6875,8 @@ bool CacheIRCompiler::emitLoadDataViewValueResult(
const size_t byteSize = Scalar::byteSize(elementType);
- EmitDataViewBoundsCheck(masm, byteSize, obj, offset, outputScratch,
- failure->label());
+ emitDataViewBoundsCheck(viewKind, byteSize, obj, offset, outputScratch,
+ boundsCheckScratch, failure->label());
masm.loadPtr(Address(obj, DataViewObject::dataOffset()), outputScratch);
@@ -6612,7 +7011,8 @@ bool CacheIRCompiler::emitLoadDataViewValueResult(
bool CacheIRCompiler::emitStoreDataViewValueResult(
ObjOperandId objId, IntPtrOperandId offsetId, uint32_t valueId,
- BooleanOperandId littleEndianId, Scalar::Type elementType) {
+ BooleanOperandId littleEndianId, Scalar::Type elementType,
+ ArrayBufferViewKind viewKind) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
@@ -6686,6 +7086,24 @@ bool CacheIRCompiler::emitStoreDataViewValueResult(
}
#endif
+ Register boundsCheckScratch;
+#ifndef JS_CODEGEN_X86
+ Maybe<AutoScratchRegister> maybeBoundsCheckScratch;
+ if (viewKind == ArrayBufferViewKind::Resizable) {
+ if (scratch2.constructed<AutoScratchRegister>()) {
+ boundsCheckScratch = scratch2.ref<AutoScratchRegister>().get();
+ } else if (scratch2.constructed<AutoScratchRegister64>()) {
+ boundsCheckScratch =
+ scratch2.ref<AutoScratchRegister64>().get().scratchReg();
+ } else {
+ maybeBoundsCheckScratch.emplace(allocator, masm);
+ boundsCheckScratch = *maybeBoundsCheckScratch;
+ }
+ }
+#else
+ // Not enough registers on x86.
+#endif
+
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
@@ -6693,8 +7111,8 @@ bool CacheIRCompiler::emitStoreDataViewValueResult(
const size_t byteSize = Scalar::byteSize(elementType);
- EmitDataViewBoundsCheck(masm, byteSize, obj, offset, scratch1,
- failure->label());
+ emitDataViewBoundsCheck(viewKind, byteSize, obj, offset, scratch1,
+ boundsCheckScratch, failure->label());
masm.loadPtr(Address(obj, DataViewObject::dataOffset()), scratch1);
BaseIndex dest(scratch1, offset, TimesOne);
@@ -8903,7 +9321,8 @@ bool CacheIRCompiler::emitGetFirstDollarIndexResult(StringOperandId strId) {
bool CacheIRCompiler::emitAtomicsCompareExchangeResult(
ObjOperandId objId, IntPtrOperandId indexId, uint32_t expectedId,
- uint32_t replacementId, Scalar::Type elementType) {
+ uint32_t replacementId, Scalar::Type elementType,
+ ArrayBufferViewKind viewKind) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Maybe<AutoOutputRegister> output;
@@ -8936,8 +9355,17 @@ bool CacheIRCompiler::emitAtomicsCompareExchangeResult(
: callvm->outputValueReg().scratchReg();
MOZ_ASSERT(scratch != obj, "scratchReg must not be typeReg");
+ Maybe<AutoScratchRegister> scratch2;
+ if (viewKind == ArrayBufferViewKind::Resizable) {
+#ifdef JS_CODEGEN_X86
+ // Not enough spare registers on x86.
+#else
+ scratch2.emplace(allocator, masm);
+#endif
+ }
+
// Not enough registers on X86.
- Register spectreTemp = Register::Invalid();
+ constexpr auto spectreTemp = mozilla::Nothing{};
FailurePath* failure;
if (!addFailurePath(&failure)) {
@@ -8950,8 +9378,8 @@ bool CacheIRCompiler::emitAtomicsCompareExchangeResult(
MOZ_ASSERT(isBaseline(), "Can't use FailurePath with AutoCallVM in Ion ICs");
// Bounds check.
- masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
- masm.spectreBoundsCheckPtr(index, scratch, spectreTemp, failure->label());
+ emitTypedArrayBoundsCheck(viewKind, obj, index, scratch, scratch2,
+ spectreTemp, failure->label());
// Atomic operations are highly platform-dependent, for example x86/x64 has
// specific requirements on which registers are used; MIPS needs multiple
@@ -8966,8 +9394,8 @@ bool CacheIRCompiler::emitAtomicsCompareExchangeResult(
masm.Push(index);
masm.Push(obj);
- using Fn = BigInt* (*)(JSContext*, FixedLengthTypedArrayObject*, size_t,
- const BigInt*, const BigInt*);
+ using Fn = BigInt* (*)(JSContext*, TypedArrayObject*, size_t, const BigInt*,
+ const BigInt*);
callvm->call<Fn, jit::AtomicsCompareExchange64>();
return true;
}
@@ -9004,15 +9432,20 @@ bool CacheIRCompiler::emitAtomicsCompareExchangeResult(
bool CacheIRCompiler::emitAtomicsReadModifyWriteResult(
ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId,
- Scalar::Type elementType, AtomicsReadWriteModifyFn fn) {
+ Scalar::Type elementType, ArrayBufferViewKind viewKind,
+ AtomicsReadWriteModifyFn fn) {
AutoOutputRegister output(*this);
Register obj = allocator.useRegister(masm, objId);
Register index = allocator.useRegister(masm, indexId);
Register value = allocator.useRegister(masm, Int32OperandId(valueId));
AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+ Maybe<AutoScratchRegisterMaybeOutputType> scratch2;
+ if (viewKind == ArrayBufferViewKind::Resizable) {
+ scratch2.emplace(allocator, masm, output);
+ }
// Not enough registers on X86.
- Register spectreTemp = Register::Invalid();
+ constexpr auto spectreTemp = mozilla::Nothing{};
FailurePath* failure;
if (!addFailurePath(&failure)) {
@@ -9020,8 +9453,8 @@ bool CacheIRCompiler::emitAtomicsReadModifyWriteResult(
}
// Bounds check.
- masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
- masm.spectreBoundsCheckPtr(index, scratch, spectreTemp, failure->label());
+ emitTypedArrayBoundsCheck(viewKind, obj, index, scratch, scratch2,
+ spectreTemp, failure->label());
// See comment in emitAtomicsCompareExchange for why we use an ABI call.
{
@@ -9054,15 +9487,20 @@ bool CacheIRCompiler::emitAtomicsReadModifyWriteResult(
template <CacheIRCompiler::AtomicsReadWriteModify64Fn fn>
bool CacheIRCompiler::emitAtomicsReadModifyWriteResult64(
- ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId) {
+ ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId,
+ ArrayBufferViewKind viewKind) {
AutoCallVM callvm(masm, this, allocator);
Register obj = allocator.useRegister(masm, objId);
Register index = allocator.useRegister(masm, indexId);
Register value = allocator.useRegister(masm, BigIntOperandId(valueId));
AutoScratchRegisterMaybeOutput scratch(allocator, masm, callvm.output());
+ Maybe<AutoScratchRegisterMaybeOutputType> scratch2;
+ if (viewKind == ArrayBufferViewKind::Resizable) {
+ scratch2.emplace(allocator, masm, callvm.output());
+ }
// Not enough registers on X86.
- Register spectreTemp = Register::Invalid();
+ constexpr auto spectreTemp = mozilla::Nothing{};
FailurePath* failure;
if (!addFailurePath(&failure)) {
@@ -9075,8 +9513,8 @@ bool CacheIRCompiler::emitAtomicsReadModifyWriteResult64(
MOZ_ASSERT(isBaseline(), "Can't use FailurePath with AutoCallVM in Ion ICs");
// Bounds check.
- masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
- masm.spectreBoundsCheckPtr(index, scratch, spectreTemp, failure->label());
+ emitTypedArrayBoundsCheck(viewKind, obj, index, scratch, scratch2,
+ spectreTemp, failure->label());
// See comment in emitAtomicsCompareExchange for why we use a VM call.
@@ -9093,95 +9531,88 @@ bool CacheIRCompiler::emitAtomicsReadModifyWriteResult64(
bool CacheIRCompiler::emitAtomicsExchangeResult(ObjOperandId objId,
IntPtrOperandId indexId,
uint32_t valueId,
- Scalar::Type elementType) {
+ Scalar::Type elementType,
+ ArrayBufferViewKind viewKind) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
if (Scalar::isBigIntType(elementType)) {
return emitAtomicsReadModifyWriteResult64<jit::AtomicsExchange64>(
- objId, indexId, valueId);
+ objId, indexId, valueId, viewKind);
}
return emitAtomicsReadModifyWriteResult(objId, indexId, valueId, elementType,
+ viewKind,
AtomicsExchange(elementType));
}
-bool CacheIRCompiler::emitAtomicsAddResult(ObjOperandId objId,
- IntPtrOperandId indexId,
- uint32_t valueId,
- Scalar::Type elementType,
- bool forEffect) {
+bool CacheIRCompiler::emitAtomicsAddResult(
+ ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId,
+ Scalar::Type elementType, bool forEffect, ArrayBufferViewKind viewKind) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
if (Scalar::isBigIntType(elementType)) {
- return emitAtomicsReadModifyWriteResult64<jit::AtomicsAdd64>(objId, indexId,
- valueId);
+ return emitAtomicsReadModifyWriteResult64<jit::AtomicsAdd64>(
+ objId, indexId, valueId, viewKind);
}
return emitAtomicsReadModifyWriteResult(objId, indexId, valueId, elementType,
- AtomicsAdd(elementType));
+ viewKind, AtomicsAdd(elementType));
}
-bool CacheIRCompiler::emitAtomicsSubResult(ObjOperandId objId,
- IntPtrOperandId indexId,
- uint32_t valueId,
- Scalar::Type elementType,
- bool forEffect) {
+bool CacheIRCompiler::emitAtomicsSubResult(
+ ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId,
+ Scalar::Type elementType, bool forEffect, ArrayBufferViewKind viewKind) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
if (Scalar::isBigIntType(elementType)) {
- return emitAtomicsReadModifyWriteResult64<jit::AtomicsSub64>(objId, indexId,
- valueId);
+ return emitAtomicsReadModifyWriteResult64<jit::AtomicsSub64>(
+ objId, indexId, valueId, viewKind);
}
return emitAtomicsReadModifyWriteResult(objId, indexId, valueId, elementType,
- AtomicsSub(elementType));
+ viewKind, AtomicsSub(elementType));
}
-bool CacheIRCompiler::emitAtomicsAndResult(ObjOperandId objId,
- IntPtrOperandId indexId,
- uint32_t valueId,
- Scalar::Type elementType,
- bool forEffect) {
+bool CacheIRCompiler::emitAtomicsAndResult(
+ ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId,
+ Scalar::Type elementType, bool forEffect, ArrayBufferViewKind viewKind) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
if (Scalar::isBigIntType(elementType)) {
- return emitAtomicsReadModifyWriteResult64<jit::AtomicsAnd64>(objId, indexId,
- valueId);
+ return emitAtomicsReadModifyWriteResult64<jit::AtomicsAnd64>(
+ objId, indexId, valueId, viewKind);
}
return emitAtomicsReadModifyWriteResult(objId, indexId, valueId, elementType,
- AtomicsAnd(elementType));
+ viewKind, AtomicsAnd(elementType));
}
-bool CacheIRCompiler::emitAtomicsOrResult(ObjOperandId objId,
- IntPtrOperandId indexId,
- uint32_t valueId,
- Scalar::Type elementType,
- bool forEffect) {
+bool CacheIRCompiler::emitAtomicsOrResult(
+ ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId,
+ Scalar::Type elementType, bool forEffect, ArrayBufferViewKind viewKind) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
if (Scalar::isBigIntType(elementType)) {
- return emitAtomicsReadModifyWriteResult64<jit::AtomicsOr64>(objId, indexId,
- valueId);
+ return emitAtomicsReadModifyWriteResult64<jit::AtomicsOr64>(
+ objId, indexId, valueId, viewKind);
}
return emitAtomicsReadModifyWriteResult(objId, indexId, valueId, elementType,
- AtomicsOr(elementType));
+ viewKind, AtomicsOr(elementType));
}
-bool CacheIRCompiler::emitAtomicsXorResult(ObjOperandId objId,
- IntPtrOperandId indexId,
- uint32_t valueId,
- Scalar::Type elementType,
- bool forEffect) {
+bool CacheIRCompiler::emitAtomicsXorResult(
+ ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId,
+ Scalar::Type elementType, bool forEffect, ArrayBufferViewKind viewKind) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
if (Scalar::isBigIntType(elementType)) {
- return emitAtomicsReadModifyWriteResult64<jit::AtomicsXor64>(objId, indexId,
- valueId);
+ return emitAtomicsReadModifyWriteResult64<jit::AtomicsXor64>(
+ objId, indexId, valueId, viewKind);
}
return emitAtomicsReadModifyWriteResult(objId, indexId, valueId, elementType,
- AtomicsXor(elementType));
+ viewKind, AtomicsXor(elementType));
}
bool CacheIRCompiler::emitAtomicsLoadResult(ObjOperandId objId,
IntPtrOperandId indexId,
- Scalar::Type elementType) {
+ Scalar::Type elementType,
+ ArrayBufferViewKind viewKind) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Maybe<AutoOutputRegister> output;
@@ -9195,7 +9626,13 @@ bool CacheIRCompiler::emitAtomicsLoadResult(ObjOperandId objId,
Register index = allocator.useRegister(masm, indexId);
AutoScratchRegisterMaybeOutput scratch(allocator, masm,
output ? *output : callvm->output());
- AutoSpectreBoundsScratchRegister spectreTemp(allocator, masm);
+ Maybe<AutoSpectreBoundsScratchRegister> spectreTemp;
+ Maybe<AutoScratchRegister> scratch2;
+ if (viewKind == ArrayBufferViewKind::FixedLength) {
+ spectreTemp.emplace(allocator, masm);
+ } else {
+ scratch2.emplace(allocator, masm);
+ }
AutoAvailableFloatRegister floatReg(*this, FloatReg0);
FailurePath* failure;
@@ -9209,8 +9646,8 @@ bool CacheIRCompiler::emitAtomicsLoadResult(ObjOperandId objId,
MOZ_ASSERT(isBaseline(), "Can't use FailurePath with AutoCallVM in Ion ICs");
// Bounds check.
- masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
- masm.spectreBoundsCheckPtr(index, scratch, spectreTemp, failure->label());
+ emitTypedArrayBoundsCheck(viewKind, obj, index, scratch, scratch2,
+ spectreTemp, failure->label());
// Atomic operations are highly platform-dependent, for example x86/arm32 has
// specific requirements on which registers are used. Therefore we're using a
@@ -9221,7 +9658,7 @@ bool CacheIRCompiler::emitAtomicsLoadResult(ObjOperandId objId,
masm.Push(index);
masm.Push(obj);
- using Fn = BigInt* (*)(JSContext*, FixedLengthTypedArrayObject*, size_t);
+ using Fn = BigInt* (*)(JSContext*, TypedArrayObject*, size_t);
callvm->call<Fn, jit::AtomicsLoad64>();
return true;
}
@@ -9250,7 +9687,8 @@ bool CacheIRCompiler::emitAtomicsLoadResult(ObjOperandId objId,
bool CacheIRCompiler::emitAtomicsStoreResult(ObjOperandId objId,
IntPtrOperandId indexId,
uint32_t valueId,
- Scalar::Type elementType) {
+ Scalar::Type elementType,
+ ArrayBufferViewKind viewKind) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
@@ -9264,9 +9702,13 @@ bool CacheIRCompiler::emitAtomicsStoreResult(ObjOperandId objId,
valueBigInt.emplace(allocator.useRegister(masm, BigIntOperandId(valueId)));
}
AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+ Maybe<AutoScratchRegisterMaybeOutputType> scratch2;
+ if (viewKind == ArrayBufferViewKind::Resizable) {
+ scratch2.emplace(allocator, masm, output);
+ }
// Not enough registers on X86.
- Register spectreTemp = Register::Invalid();
+ constexpr auto spectreTemp = mozilla::Nothing{};
FailurePath* failure;
if (!addFailurePath(&failure)) {
@@ -9274,8 +9716,8 @@ bool CacheIRCompiler::emitAtomicsStoreResult(ObjOperandId objId,
}
// Bounds check.
- masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
- masm.spectreBoundsCheckPtr(index, scratch, spectreTemp, failure->label());
+ emitTypedArrayBoundsCheck(viewKind, obj, index, scratch, scratch2,
+ spectreTemp, failure->label());
if (!Scalar::isBigIntType(elementType)) {
// Load the elements vector.
@@ -9302,7 +9744,7 @@ bool CacheIRCompiler::emitAtomicsStoreResult(ObjOperandId objId,
volatileRegs.takeUnchecked(scratch);
masm.PushRegsInMask(volatileRegs);
- using Fn = void (*)(FixedLengthTypedArrayObject*, size_t, const BigInt*);
+ using Fn = void (*)(TypedArrayObject*, size_t, const BigInt*);
masm.setupUnalignedABICall(scratch);
masm.passABIArg(obj);
masm.passABIArg(index);
diff --git a/js/src/jit/CacheIRCompiler.h b/js/src/jit/CacheIRCompiler.h
index 3b8941e242..69b1dd34ac 100644
--- a/js/src/jit/CacheIRCompiler.h
+++ b/js/src/jit/CacheIRCompiler.h
@@ -25,7 +25,6 @@ class BigInt;
namespace js {
-class FixedLengthTypedArrayObject;
class TypedArrayObject;
enum class UnaryMathFunction : uint8_t;
@@ -846,21 +845,37 @@ class MOZ_RAII CacheIRCompiler {
bool emitDoubleIncDecResult(bool isInc, NumberOperandId inputId);
- using AtomicsReadWriteModifyFn = int32_t (*)(FixedLengthTypedArrayObject*,
- size_t, int32_t);
+ void emitTypedArrayBoundsCheck(ArrayBufferViewKind viewKind, Register obj,
+ Register index, Register scratch,
+ Register maybeScratch, Register spectreScratch,
+ Label* fail);
+
+ void emitTypedArrayBoundsCheck(ArrayBufferViewKind viewKind, Register obj,
+ Register index, Register scratch,
+ mozilla::Maybe<Register> maybeScratch,
+ mozilla::Maybe<Register> spectreScratch,
+ Label* fail);
+
+ void emitDataViewBoundsCheck(ArrayBufferViewKind viewKind, size_t byteSize,
+ Register obj, Register offset, Register scratch,
+ Register maybeScratch, Label* fail);
+
+ using AtomicsReadWriteModifyFn = int32_t (*)(TypedArrayObject*, size_t,
+ int32_t);
[[nodiscard]] bool emitAtomicsReadModifyWriteResult(
ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId,
- Scalar::Type elementType, AtomicsReadWriteModifyFn fn);
+ Scalar::Type elementType, ArrayBufferViewKind viewKind,
+ AtomicsReadWriteModifyFn fn);
- using AtomicsReadWriteModify64Fn =
- JS::BigInt* (*)(JSContext*, FixedLengthTypedArrayObject*, size_t,
- const JS::BigInt*);
+ using AtomicsReadWriteModify64Fn = JS::BigInt* (*)(JSContext*,
+ TypedArrayObject*, size_t,
+ const JS::BigInt*);
template <AtomicsReadWriteModify64Fn fn>
- [[nodiscard]] bool emitAtomicsReadModifyWriteResult64(ObjOperandId objId,
- IntPtrOperandId indexId,
- uint32_t valueId);
+ [[nodiscard]] bool emitAtomicsReadModifyWriteResult64(
+ ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId,
+ ArrayBufferViewKind viewKind);
void emitActivateIterator(Register objBeingIterated, Register iterObject,
Register nativeIter, Register scratch,
diff --git a/js/src/jit/CacheIRGenerator.h b/js/src/jit/CacheIRGenerator.h
index 9880b82b71..2e15b2d8a6 100644
--- a/js/src/jit/CacheIRGenerator.h
+++ b/js/src/jit/CacheIRGenerator.h
@@ -636,6 +636,9 @@ class MOZ_RAII InlinableNativeIRGenerator {
AttachDecision tryAttachIsConstructor();
AttachDecision tryAttachIsCrossRealmArrayConstructor();
AttachDecision tryAttachGuardToClass(InlinableNative native);
+ AttachDecision tryAttachGuardToClass(GuardClassKind kind);
+ AttachDecision tryAttachGuardToEitherClass(GuardClassKind kind1,
+ GuardClassKind kind2);
AttachDecision tryAttachGuardToArrayBuffer();
AttachDecision tryAttachGuardToSharedArrayBuffer();
AttachDecision tryAttachHasClass(const JSClass* clasp,
@@ -693,8 +696,8 @@ class MOZ_RAII InlinableNativeIRGenerator {
AttachDecision tryAttachIsTypedArrayConstructor();
AttachDecision tryAttachTypedArrayByteOffset();
AttachDecision tryAttachTypedArrayElementSize();
- AttachDecision tryAttachTypedArrayLength(bool isPossiblyWrapped);
- AttachDecision tryAttachTypedArrayLengthZeroOnOutOfBounds();
+ AttachDecision tryAttachTypedArrayLength(bool isPossiblyWrapped,
+ bool allowOutOfBounds);
AttachDecision tryAttachArrayBufferByteLength(bool isPossiblyWrapped);
AttachDecision tryAttachIsConstructing();
AttachDecision tryAttachGetNextMapSetEntryForIterator(bool isMap);
diff --git a/js/src/jit/CacheIROps.yaml b/js/src/jit/CacheIROps.yaml
index ccaf64d924..974404d5c0 100644
--- a/js/src/jit/CacheIROps.yaml
+++ b/js/src/jit/CacheIROps.yaml
@@ -272,6 +272,16 @@
obj: ObjId
kind: GuardClassKindImm
+# Guard per GuardClassKind.
+- name: GuardEitherClass
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ obj: ObjId
+ kind1: GuardClassKindImm
+ kind2: GuardClassKindImm
+
# Guard on a realm fuse.
- name: GuardFuse
shared: true
@@ -472,6 +482,13 @@
args:
obj: ObjId
+- name: GuardIsResizableTypedArray
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ obj: ObjId
+
- name: GuardHasProxyHandler
shared: false
transpile: true
@@ -1189,6 +1206,20 @@
args:
obj: ObjId
+- name: ResizableTypedArrayByteOffsetMaybeOutOfBoundsInt32Result
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ obj: ObjId
+
+- name: ResizableTypedArrayByteOffsetMaybeOutOfBoundsDoubleResult
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ obj: ObjId
+
- name: TypedArrayByteLengthInt32Result
shared: true
transpile: true
@@ -1203,6 +1234,34 @@
args:
obj: ObjId
+- name: ResizableTypedArrayByteLengthInt32Result
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ obj: ObjId
+
+- name: ResizableTypedArrayByteLengthDoubleResult
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ obj: ObjId
+
+- name: ResizableTypedArrayLengthInt32Result
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ obj: ObjId
+
+- name: ResizableTypedArrayLengthDoubleResult
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ obj: ObjId
+
- name: TypedArrayElementSizeResult
shared: true
transpile: true
@@ -1210,6 +1269,34 @@
args:
obj: ObjId
+- name: ResizableDataViewByteLengthInt32Result
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ obj: ObjId
+
+- name: ResizableDataViewByteLengthDoubleResult
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ obj: ObjId
+
+- name: GrowableSharedArrayBufferByteLengthInt32Result
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ obj: ObjId
+
+- name: GrowableSharedArrayBufferByteLengthDoubleResult
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ obj: ObjId
+
- name: GuardHasAttachedArrayBuffer
shared: true
transpile: true
@@ -1217,6 +1304,20 @@
args:
obj: ObjId
+- name: GuardResizableArrayBufferViewInBounds
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ obj: ObjId
+
+- name: GuardResizableArrayBufferViewInBoundsOrDetached
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ obj: ObjId
+
- name: NewArrayIteratorResult
shared: true
transpile: true
@@ -1615,6 +1716,7 @@
index: IntPtrId
rhs: RawId
handleOOB: BoolImm
+ viewKind: ArrayBufferViewKindImm
- name: AtomicsCompareExchangeResult
shared: true
@@ -1626,6 +1728,7 @@
expected: RawId
replacement: RawId
elementType: ScalarTypeImm
+ viewKind: ArrayBufferViewKindImm
- name: AtomicsExchangeResult
shared: true
@@ -1636,6 +1739,7 @@
index: IntPtrId
value: RawId
elementType: ScalarTypeImm
+ viewKind: ArrayBufferViewKindImm
- name: AtomicsAddResult
shared: true
@@ -1647,6 +1751,7 @@
value: RawId
elementType: ScalarTypeImm
forEffect: BoolImm
+ viewKind: ArrayBufferViewKindImm
- name: AtomicsSubResult
shared: true
@@ -1658,6 +1763,7 @@
value: RawId
elementType: ScalarTypeImm
forEffect: BoolImm
+ viewKind: ArrayBufferViewKindImm
- name: AtomicsAndResult
shared: true
@@ -1669,6 +1775,7 @@
value: RawId
elementType: ScalarTypeImm
forEffect: BoolImm
+ viewKind: ArrayBufferViewKindImm
- name: AtomicsOrResult
shared: true
@@ -1680,6 +1787,7 @@
value: RawId
elementType: ScalarTypeImm
forEffect: BoolImm
+ viewKind: ArrayBufferViewKindImm
- name: AtomicsXorResult
shared: true
@@ -1691,6 +1799,7 @@
value: RawId
elementType: ScalarTypeImm
forEffect: BoolImm
+ viewKind: ArrayBufferViewKindImm
- name: AtomicsLoadResult
shared: true
@@ -1700,6 +1809,7 @@
obj: ObjId
index: IntPtrId
elementType: ScalarTypeImm
+ viewKind: ArrayBufferViewKindImm
- name: AtomicsStoreResult
shared: true
@@ -1710,6 +1820,7 @@
index: IntPtrId
value: RawId
elementType: ScalarTypeImm
+ viewKind: ArrayBufferViewKindImm
- name: AtomicsIsLockFreeResult
shared: true
@@ -2051,6 +2162,7 @@
args:
obj: ObjId
index: IntPtrId
+ viewKind: ArrayBufferViewKindImm
- name: LoadDenseElementHoleExistsResult
shared: true
@@ -2070,6 +2182,7 @@
elementType: ScalarTypeImm
handleOOB: BoolImm
forceDoubleForUint32: BoolImm
+ viewKind: ArrayBufferViewKindImm
- name: LoadDataViewValueResult
shared: true
@@ -2081,6 +2194,7 @@
littleEndian: BooleanId
elementType: ScalarTypeImm
forceDoubleForUint32: BoolImm
+ viewKind: ArrayBufferViewKindImm
- name: StoreDataViewValueResult
shared: true
@@ -2092,6 +2206,7 @@
value: RawId
littleEndian: BooleanId
elementType: ScalarTypeImm
+ viewKind: ArrayBufferViewKindImm
- name: LoadInt32ArrayLengthResult
shared: true
diff --git a/js/src/jit/CacheIRReader.h b/js/src/jit/CacheIRReader.h
index affefdac01..54b298c999 100644
--- a/js/src/jit/CacheIRReader.h
+++ b/js/src/jit/CacheIRReader.h
@@ -96,6 +96,9 @@ class MOZ_RAII CacheIRReader {
uint32_t stubOffset() { return buffer_.readByte() * sizeof(uintptr_t); }
GuardClassKind guardClassKind() { return GuardClassKind(buffer_.readByte()); }
+ ArrayBufferViewKind arrayBufferViewKind() {
+ return ArrayBufferViewKind(buffer_.readByte());
+ }
ValueType valueType() { return ValueType(buffer_.readByte()); }
wasm::ValType::Kind wasmValType() {
return wasm::ValType::Kind(buffer_.readByte());
diff --git a/js/src/jit/CacheIRSpewer.cpp b/js/src/jit/CacheIRSpewer.cpp
index 921da75d61..613e0f7d85 100644
--- a/js/src/jit/CacheIRSpewer.cpp
+++ b/js/src/jit/CacheIRSpewer.cpp
@@ -106,6 +106,9 @@ class MOZ_RAII CacheIROpsJitSpewer {
void spewGuardClassKindImm(const char* name, GuardClassKind kind) {
out_.printf("%s GuardClassKind(%u)", name, unsigned(kind));
}
+ void spewArrayBufferViewKindImm(const char* name, ArrayBufferViewKind kind) {
+ out_.printf("%s ArrayBufferViewKind(%u)", name, unsigned(kind));
+ }
void spewWasmValTypeImm(const char* name, wasm::ValType::Kind kind) {
out_.printf("%s WasmValTypeKind(%u)", name, unsigned(kind));
}
@@ -251,6 +254,9 @@ class MOZ_RAII CacheIROpsJSONSpewer {
void spewGuardClassKindImm(const char* name, GuardClassKind kind) {
spewArgImpl(name, "Imm", unsigned(kind));
}
+ void spewArrayBufferViewKindImm(const char* name, ArrayBufferViewKind kind) {
+ spewArgImpl(name, "Imm", unsigned(kind));
+ }
void spewRealmFuseIndexImm(const char* name, RealmFuses::FuseIndex kind) {
spewArgImpl(name, "Imm", unsigned(kind));
}
diff --git a/js/src/jit/CacheIRWriter.h b/js/src/jit/CacheIRWriter.h
index 454a1b2511..6a32885d7c 100644
--- a/js/src/jit/CacheIRWriter.h
+++ b/js/src/jit/CacheIRWriter.h
@@ -262,6 +262,11 @@ class MOZ_RAII CacheIRWriter : public JS::CustomAutoRooter {
"GuardClassKind must fit in a byte");
buffer_.writeByte(uint8_t(kind));
}
+ void writeArrayBufferViewKindImm(ArrayBufferViewKind kind) {
+ static_assert(sizeof(ArrayBufferViewKind) == sizeof(uint8_t),
+ "ArrayBufferViewKind must fit in a byte");
+ buffer_.writeByte(uint8_t(kind));
+ }
void writeValueTypeImm(ValueType type) {
static_assert(sizeof(ValueType) == sizeof(uint8_t),
"ValueType must fit in uint8_t");
diff --git a/js/src/jit/CodeGenerator.cpp b/js/src/jit/CodeGenerator.cpp
index 2c41acc736..10a69f0cb3 100644
--- a/js/src/jit/CodeGenerator.cpp
+++ b/js/src/jit/CodeGenerator.cpp
@@ -2167,8 +2167,8 @@ class CreateDependentString {
NotInlineString,
Count
};
- mozilla::EnumeratedArray<FallbackKind, FallbackKind::Count, Label> fallbacks_,
- joins_;
+ mozilla::EnumeratedArray<FallbackKind, Label, size_t(FallbackKind::Count)>
+ fallbacks_, joins_;
public:
CreateDependentString(CharEncoding encoding, Register string, Register temp1,
@@ -4632,6 +4632,17 @@ void CodeGenerator::visitGuardIsFixedLengthTypedArray(
bailoutFrom(&bail, guard->snapshot());
}
+void CodeGenerator::visitGuardIsResizableTypedArray(
+ LGuardIsResizableTypedArray* guard) {
+ Register obj = ToRegister(guard->input());
+ Register temp = ToRegister(guard->temp0());
+
+ Label bail;
+ masm.loadObjClassUnsafe(obj, temp);
+ masm.branchIfClassIsNotResizableTypedArray(temp, &bail);
+ bailoutFrom(&bail, guard->snapshot());
+}
+
void CodeGenerator::visitGuardHasProxyHandler(LGuardHasProxyHandler* guard) {
Register obj = ToRegister(guard->input());
@@ -9660,6 +9671,68 @@ void CodeGenerator::visitTypedArrayElementSize(LTypedArrayElementSize* lir) {
masm.typedArrayElementSize(obj, out);
}
+void CodeGenerator::visitResizableTypedArrayByteOffsetMaybeOutOfBounds(
+ LResizableTypedArrayByteOffsetMaybeOutOfBounds* lir) {
+ Register obj = ToRegister(lir->object());
+ Register out = ToRegister(lir->output());
+ Register temp = ToRegister(lir->temp0());
+
+ masm.loadResizableTypedArrayByteOffsetMaybeOutOfBoundsIntPtr(obj, out, temp);
+}
+
+void CodeGenerator::visitResizableTypedArrayLength(
+ LResizableTypedArrayLength* lir) {
+ Register obj = ToRegister(lir->object());
+ Register out = ToRegister(lir->output());
+ Register temp = ToRegister(lir->temp0());
+
+ masm.loadResizableTypedArrayLengthIntPtr(lir->synchronization(), obj, out,
+ temp);
+}
+
+void CodeGenerator::visitResizableDataViewByteLength(
+ LResizableDataViewByteLength* lir) {
+ Register obj = ToRegister(lir->object());
+ Register out = ToRegister(lir->output());
+ Register temp = ToRegister(lir->temp0());
+
+ masm.loadResizableDataViewByteLengthIntPtr(lir->synchronization(), obj, out,
+ temp);
+}
+
+void CodeGenerator::visitGrowableSharedArrayBufferByteLength(
+ LGrowableSharedArrayBufferByteLength* lir) {
+ Register obj = ToRegister(lir->object());
+ Register out = ToRegister(lir->output());
+
+ // Explicit |byteLength| accesses are seq-consistent atomic loads.
+ auto sync = Synchronization::Load();
+
+ masm.loadGrowableSharedArrayBufferByteLengthIntPtr(sync, obj, out);
+}
+
+void CodeGenerator::visitGuardResizableArrayBufferViewInBounds(
+ LGuardResizableArrayBufferViewInBounds* lir) {
+ Register obj = ToRegister(lir->object());
+ Register temp = ToRegister(lir->temp0());
+
+ Label bail;
+ masm.branchIfResizableArrayBufferViewOutOfBounds(obj, temp, &bail);
+ bailoutFrom(&bail, lir->snapshot());
+}
+
+void CodeGenerator::visitGuardResizableArrayBufferViewInBoundsOrDetached(
+ LGuardResizableArrayBufferViewInBoundsOrDetached* lir) {
+ Register obj = ToRegister(lir->object());
+ Register temp = ToRegister(lir->temp0());
+
+ Label done, bail;
+ masm.branchIfResizableArrayBufferViewInBounds(obj, temp, &done);
+ masm.branchIfHasAttachedArrayBuffer(obj, temp, &bail);
+ masm.bind(&done);
+ bailoutFrom(&bail, lir->snapshot());
+}
+
void CodeGenerator::visitGuardHasAttachedArrayBuffer(
LGuardHasAttachedArrayBuffer* lir) {
Register obj = ToRegister(lir->object());
@@ -15039,15 +15112,19 @@ static bool CreateStackMapFromLSafepoint(LSafepoint& safepoint,
// REG DUMP AREA, if any.
size_t regDumpWords = 0;
const LiveGeneralRegisterSet wasmAnyRefRegs = safepoint.wasmAnyRefRegs();
- GeneralRegisterForwardIterator wasmAnyRefRegsIter(wasmAnyRefRegs);
+ const LiveGeneralRegisterSet slotsOrElementsRegs =
+ safepoint.slotsOrElementsRegs();
+ const LiveGeneralRegisterSet refRegs(GeneralRegisterSet::Union(
+ wasmAnyRefRegs.set(), slotsOrElementsRegs.set()));
+ GeneralRegisterForwardIterator refRegsIter(refRegs);
switch (safepoint.wasmSafepointKind()) {
case WasmSafepointKind::LirCall:
case WasmSafepointKind::CodegenCall: {
size_t spilledNumWords = nRegisterDumpBytes / sizeof(void*);
regDumpWords += spilledNumWords;
- for (; wasmAnyRefRegsIter.more(); ++wasmAnyRefRegsIter) {
- Register reg = *wasmAnyRefRegsIter;
+ for (; refRegsIter.more(); ++refRegsIter) {
+ Register reg = *refRegsIter;
size_t offsetFromSpillBase =
safepoint.liveRegs().gprs().offsetOfPushedRegister(reg) /
sizeof(void*);
@@ -15055,9 +15132,13 @@ static bool CreateStackMapFromLSafepoint(LSafepoint& safepoint,
offsetFromSpillBase <= spilledNumWords);
size_t index = spilledNumWords - offsetFromSpillBase;
- stackMap->set(index, wasm::StackMap::AnyRef);
+ if (wasmAnyRefRegs.has(reg)) {
+ stackMap->set(index, wasm::StackMap::AnyRef);
+ } else {
+ MOZ_ASSERT(slotsOrElementsRegs.has(reg));
+ stackMap->set(index, wasm::StackMap::ArrayDataPointer);
+ }
}
-
// Float and vector registers do not have to be handled; they cannot
// contain wasm anyrefs, and they are spilled after general-purpose
// registers. Gprs are therefore closest to the spill base and thus their
@@ -15066,8 +15147,8 @@ static bool CreateStackMapFromLSafepoint(LSafepoint& safepoint,
case WasmSafepointKind::Trap: {
regDumpWords += trapExitLayoutNumWords;
- for (; wasmAnyRefRegsIter.more(); ++wasmAnyRefRegsIter) {
- Register reg = *wasmAnyRefRegsIter;
+ for (; refRegsIter.more(); ++refRegsIter) {
+ Register reg = *refRegsIter;
size_t offsetFromTop = trapExitLayout.getOffset(reg);
// If this doesn't hold, the associated register wasn't saved by
@@ -15080,7 +15161,12 @@ static bool CreateStackMapFromLSafepoint(LSafepoint& safepoint,
// offset up from the bottom of the (integer register) save area.
size_t offsetFromBottom = trapExitLayoutNumWords - 1 - offsetFromTop;
- stackMap->set(offsetFromBottom, wasm::StackMap::AnyRef);
+ if (wasmAnyRefRegs.has(reg)) {
+ stackMap->set(offsetFromBottom, wasm::StackMap::AnyRef);
+ } else {
+ MOZ_ASSERT(slotsOrElementsRegs.has(reg));
+ stackMap->set(offsetFromBottom, wasm::StackMap::ArrayDataPointer);
+ }
}
} break;
default:
@@ -17263,25 +17349,20 @@ void CodeGenerator::visitLoadDataViewElement(LLoadDataViewElement* lir) {
void CodeGenerator::visitLoadTypedArrayElementHole(
LLoadTypedArrayElementHole* lir) {
- Register object = ToRegister(lir->object());
+ Register elements = ToRegister(lir->elements());
+ Register index = ToRegister(lir->index());
+ Register length = ToRegister(lir->length());
const ValueOperand out = ToOutValue(lir);
- // Load the length.
Register scratch = out.scratchReg();
- Register scratch2 = ToRegister(lir->temp0());
- Register index = ToRegister(lir->index());
- masm.loadArrayBufferViewLengthIntPtr(object, scratch);
// Load undefined if index >= length.
Label outOfBounds, done;
- masm.spectreBoundsCheckPtr(index, scratch, scratch2, &outOfBounds);
-
- // Load the elements vector.
- masm.loadPtr(Address(object, ArrayBufferViewObject::dataOffset()), scratch);
+ masm.spectreBoundsCheckPtr(index, length, scratch, &outOfBounds);
Scalar::Type arrayType = lir->mir()->arrayType();
Label fail;
- BaseIndex source(scratch, index, ScaleFromScalarType(arrayType));
+ BaseIndex source(elements, index, ScaleFromScalarType(arrayType));
MacroAssembler::Uint32Mode uint32Mode =
lir->mir()->forceDouble() ? MacroAssembler::Uint32Mode::ForceDouble
: MacroAssembler::Uint32Mode::FailOnDouble;
@@ -17301,37 +17382,38 @@ void CodeGenerator::visitLoadTypedArrayElementHole(
void CodeGenerator::visitLoadTypedArrayElementHoleBigInt(
LLoadTypedArrayElementHoleBigInt* lir) {
- Register object = ToRegister(lir->object());
+ Register elements = ToRegister(lir->elements());
+ Register index = ToRegister(lir->index());
+ Register length = ToRegister(lir->length());
const ValueOperand out = ToOutValue(lir);
- // On x86 there are not enough registers. In that case reuse the output's
- // type register as temporary.
+ Register temp = ToRegister(lir->temp());
+
+ // On x86 there are not enough registers. In that case reuse the output
+ // registers as temporaries.
#ifdef JS_CODEGEN_X86
- MOZ_ASSERT(lir->temp()->isBogusTemp());
- Register temp = out.typeReg();
+ MOZ_ASSERT(lir->temp64().isBogusTemp());
+ Register64 temp64 = out.toRegister64();
#else
- Register temp = ToRegister(lir->temp());
-#endif
Register64 temp64 = ToRegister64(lir->temp64());
-
- // Load the length.
- Register scratch = out.scratchReg();
- Register index = ToRegister(lir->index());
- masm.loadArrayBufferViewLengthIntPtr(object, scratch);
+#endif
// Load undefined if index >= length.
Label outOfBounds, done;
- masm.spectreBoundsCheckPtr(index, scratch, temp, &outOfBounds);
-
- // Load the elements vector.
- masm.loadPtr(Address(object, ArrayBufferViewObject::dataOffset()), scratch);
+ masm.spectreBoundsCheckPtr(index, length, temp, &outOfBounds);
Scalar::Type arrayType = lir->mir()->arrayType();
- BaseIndex source(scratch, index, ScaleFromScalarType(arrayType));
+ BaseIndex source(elements, index, ScaleFromScalarType(arrayType));
masm.load64(source, temp64);
+#ifdef JS_CODEGEN_X86
+ Register bigInt = temp;
+ Register maybeTemp = InvalidReg;
+#else
Register bigInt = out.scratchReg();
- emitCreateBigInt(lir, arrayType, temp64, bigInt, temp);
+ Register maybeTemp = temp;
+#endif
+ emitCreateBigInt(lir, arrayType, temp64, bigInt, maybeTemp);
masm.tagValue(JSVAL_TYPE_BIGINT, bigInt, out);
masm.jump(&done);
@@ -17679,6 +17761,10 @@ void CodeGenerator::visitStoreTypedArrayElementHoleBigInt(
masm.bind(&skip);
}
+void CodeGenerator::visitMemoryBarrier(LMemoryBarrier* ins) {
+ masm.memoryBarrier(ins->type());
+}
+
void CodeGenerator::visitAtomicIsLockFree(LAtomicIsLockFree* lir) {
Register value = ToRegister(lir->value());
Register output = ToRegister(lir->output());
@@ -18453,6 +18539,24 @@ void CodeGenerator::visitGuardToClass(LGuardToClass* ins) {
bailoutFrom(&notEqual, ins->snapshot());
}
+void CodeGenerator::visitGuardToEitherClass(LGuardToEitherClass* ins) {
+ Register lhs = ToRegister(ins->lhs());
+ Register temp = ToRegister(ins->temp0());
+
+ // branchTestObjClass may zero the object register on speculative paths
+ // (we should have a defineReuseInput allocation in this case).
+ Register spectreRegToZero = lhs;
+
+ Label notEqual;
+
+ masm.branchTestObjClass(Assembler::NotEqual, lhs,
+ {ins->mir()->getClass1(), ins->mir()->getClass2()},
+ temp, spectreRegToZero, &notEqual);
+
+ // Can't return null-return here, so bail.
+ bailoutFrom(&notEqual, ins->snapshot());
+}
+
void CodeGenerator::visitGuardToFunction(LGuardToFunction* ins) {
Register lhs = ToRegister(ins->lhs());
Register temp = ToRegister(ins->temp0());
@@ -20133,7 +20237,8 @@ void CodeGenerator::visitToHashableString(LToHashableString* ins) {
Address(input, JSString::offsetOfFlags()),
Imm32(JSString::ATOM_BIT), &isAtom);
- masm.lookupStringInAtomCacheLastLookups(input, output, ool->entry());
+ masm.lookupStringInAtomCacheLastLookups(input, output, output, ool->entry());
+ masm.jump(ool->rejoin());
masm.bind(&isAtom);
masm.movePtr(input, output);
masm.bind(ool->rejoin());
diff --git a/js/src/jit/Disassemble.cpp b/js/src/jit/Disassemble.cpp
index 652c381ce7..df768d4fd1 100644
--- a/js/src/jit/Disassemble.cpp
+++ b/js/src/jit/Disassemble.cpp
@@ -22,6 +22,8 @@
# include "jit/arm64/vixl/Instructions-vixl.h" // vixl::Instruction
# elif defined(JS_CODEGEN_ARM)
# include "jit/arm/disasm/Disasm-arm.h" // js::jit::disasm::*
+# elif defined(JS_CODEGEN_RISCV64)
+# include "jit/riscv64/disasm/Disasm-riscv64.h" // js::jit::disasm::*
# endif
#endif
@@ -99,6 +101,31 @@ void Disassemble(uint8_t* code, size_t length, InstrCallback callback) {
}
}
+#elif defined(JS_JITSPEW) && defined(JS_CODEGEN_RISCV64)
+
+bool HasDisassembler() { return true; }
+
+void Disassemble(uint8_t* code, size_t length, InstrCallback callback) {
+ disasm::NameConverter converter;
+ disasm::Disassembler d(converter);
+
+ uint8_t* instr = code;
+ uint8_t* end = code + length;
+
+ while (instr < end) {
+ EmbeddedVector<char, ReasonableBufferSize> buffer;
+ buffer[0] = '\0';
+ uint8_t* next_instr = instr + d.InstructionDecode(buffer, instr);
+
+ JS::UniqueChars formatted =
+ JS_smprintf("0x%p %08x %s", instr, *reinterpret_cast<int32_t*>(instr),
+ buffer.start());
+ callback(formatted.get());
+
+ instr = next_instr;
+ }
+}
+
#else
bool HasDisassembler() { return false; }
diff --git a/js/src/jit/ExecutableAllocator.h b/js/src/jit/ExecutableAllocator.h
index 85c01562c3..02c8727e85 100644
--- a/js/src/jit/ExecutableAllocator.h
+++ b/js/src/jit/ExecutableAllocator.h
@@ -72,7 +72,8 @@ class ExecutablePool {
bool m_mark : 1;
// Number of bytes currently allocated for each CodeKind.
- mozilla::EnumeratedArray<CodeKind, CodeKind::Count, size_t> m_codeBytes;
+ mozilla::EnumeratedArray<CodeKind, size_t, size_t(CodeKind::Count)>
+ m_codeBytes;
public:
void release(bool willDestroy = false);
diff --git a/js/src/jit/GenerateAtomicOperations.py b/js/src/jit/GenerateAtomicOperations.py
index 8e37e5dcd6..9194b8b685 100644
--- a/js/src/jit/GenerateAtomicOperations.py
+++ b/js/src/jit/GenerateAtomicOperations.py
@@ -50,8 +50,6 @@ def gen_load(fun_name, cpp_type, size, barrier):
# - MacroAssembler::wasmLoad
if cpu_arch in ("x86", "x86_64"):
insns = ""
- if barrier:
- insns += fmt_insn("mfence")
if size == 8:
insns += fmt_insn("movb (%[arg]), %[res]")
elif size == 16:
@@ -61,8 +59,6 @@ def gen_load(fun_name, cpp_type, size, barrier):
else:
assert size == 64
insns += fmt_insn("movq (%[arg]), %[res]")
- if barrier:
- insns += fmt_insn("mfence")
return """
INLINE_ATTR %(cpp_type)s %(fun_name)s(const %(cpp_type)s* arg) {
%(cpp_type)s res;
@@ -78,8 +74,6 @@ def gen_load(fun_name, cpp_type, size, barrier):
}
if cpu_arch == "aarch64":
insns = ""
- if barrier:
- insns += fmt_insn("dmb ish")
if size == 8:
insns += fmt_insn("ldrb %w[res], [%x[arg]]")
elif size == 16:
@@ -106,8 +100,6 @@ def gen_load(fun_name, cpp_type, size, barrier):
}
if cpu_arch == "arm":
insns = ""
- if barrier:
- insns += fmt_insn("dmb sy")
if size == 8:
insns += fmt_insn("ldrb %[res], [%[arg]]")
elif size == 16:
@@ -141,8 +133,6 @@ def gen_store(fun_name, cpp_type, size, barrier):
# - MacroAssembler::wasmStore
if cpu_arch in ("x86", "x86_64"):
insns = ""
- if barrier:
- insns += fmt_insn("mfence")
if size == 8:
insns += fmt_insn("movb %[val], (%[addr])")
elif size == 16:
diff --git a/js/src/jit/GenerateCacheIRFiles.py b/js/src/jit/GenerateCacheIRFiles.py
index 5cecf82e64..d71c70b753 100644
--- a/js/src/jit/GenerateCacheIRFiles.py
+++ b/js/src/jit/GenerateCacheIRFiles.py
@@ -82,6 +82,7 @@ arg_writer_info = {
"BoolImm": ("bool", "writeBoolImm"),
"ByteImm": ("uint32_t", "writeByteImm"), # uint32_t to enable fits-in-byte asserts.
"GuardClassKindImm": ("GuardClassKind", "writeGuardClassKindImm"),
+ "ArrayBufferViewKindImm": ("ArrayBufferViewKind", "writeArrayBufferViewKindImm"),
"ValueTypeImm": ("ValueType", "writeValueTypeImm"),
"JSWhyMagicImm": ("JSWhyMagic", "writeJSWhyMagicImm"),
"CallFlagsImm": ("CallFlags", "writeCallFlagsImm"),
@@ -184,6 +185,11 @@ arg_reader_info = {
"BoolImm": ("bool", "", "reader.readBool()"),
"ByteImm": ("uint8_t", "", "reader.readByte()"),
"GuardClassKindImm": ("GuardClassKind", "", "reader.guardClassKind()"),
+ "ArrayBufferViewKindImm": (
+ "ArrayBufferViewKind",
+ "",
+ "reader.arrayBufferViewKind()",
+ ),
"ValueTypeImm": ("ValueType", "", "reader.valueType()"),
"JSWhyMagicImm": ("JSWhyMagic", "", "reader.whyMagic()"),
"CallFlagsImm": ("CallFlags", "", "reader.callFlags()"),
@@ -272,6 +278,7 @@ arg_spewer_method = {
"BoolImm": "spewBoolImm",
"ByteImm": "spewByteImm",
"GuardClassKindImm": "spewGuardClassKindImm",
+ "ArrayBufferViewKindImm": "spewArrayBufferViewKindImm",
"ValueTypeImm": "spewValueTypeImm",
"JSWhyMagicImm": "spewJSWhyMagicImm",
"CallFlagsImm": "spewCallFlagsImm",
@@ -415,6 +422,7 @@ arg_length = {
"JSOpImm": 1,
"ValueTypeImm": 1,
"GuardClassKindImm": 1,
+ "ArrayBufferViewKindImm": 1,
"JSWhyMagicImm": 1,
"WasmValTypeImm": 1,
"Int32Imm": 4,
diff --git a/js/src/jit/IonAnalysis.cpp b/js/src/jit/IonAnalysis.cpp
index a0c9a51c39..543ed0eb83 100644
--- a/js/src/jit/IonAnalysis.cpp
+++ b/js/src/jit/IonAnalysis.cpp
@@ -747,13 +747,13 @@ static bool IsDiamondPattern(MBasicBlock* initialBlock) {
MTest* initialTest = ins->toTest();
MBasicBlock* trueBranch = initialTest->ifTrue();
- if (trueBranch->numPredecessors() != 1 || trueBranch->numSuccessors() != 1) {
+ if (trueBranch->numPredecessors() != 1 || !trueBranch->lastIns()->isGoto()) {
return false;
}
MBasicBlock* falseBranch = initialTest->ifFalse();
if (falseBranch->numPredecessors() != 1 ||
- falseBranch->numSuccessors() != 1) {
+ !falseBranch->lastIns()->isGoto()) {
return false;
}
@@ -2228,6 +2228,7 @@ bool TypeAnalyzer::adjustPhiInputs(MPhi* phi) {
phi->replaceOperand(i, in->toBox()->input());
} else {
MInstruction* replacement;
+ MBasicBlock* predecessor = phi->block()->getPredecessor(i);
if (phiType == MIRType::Double && IsFloatType(in->type())) {
// Convert int32 operands to double.
@@ -2239,14 +2240,14 @@ bool TypeAnalyzer::adjustPhiInputs(MPhi* phi) {
// See comment below
if (in->type() != MIRType::Value) {
MBox* box = MBox::New(alloc(), in);
- in->block()->insertBefore(in->block()->lastIns(), box);
+ predecessor->insertAtEnd(box);
in = box;
}
MUnbox* unbox =
MUnbox::New(alloc(), in, MIRType::Double, MUnbox::Fallible);
unbox->setBailoutKind(BailoutKind::SpeculativePhi);
- in->block()->insertBefore(in->block()->lastIns(), unbox);
+ predecessor->insertAtEnd(unbox);
replacement = MToFloat32::New(alloc(), in);
}
} else {
@@ -2255,7 +2256,7 @@ bool TypeAnalyzer::adjustPhiInputs(MPhi* phi) {
// below.
if (in->type() != MIRType::Value) {
MBox* box = MBox::New(alloc(), in);
- in->block()->insertBefore(in->block()->lastIns(), box);
+ predecessor->insertAtEnd(box);
in = box;
}
@@ -2265,7 +2266,7 @@ bool TypeAnalyzer::adjustPhiInputs(MPhi* phi) {
}
replacement->setBailoutKind(BailoutKind::SpeculativePhi);
- in->block()->insertBefore(in->block()->lastIns(), replacement);
+ predecessor->insertAtEnd(replacement);
phi->replaceOperand(i, replacement);
}
}
@@ -4452,6 +4453,10 @@ static bool NeedsKeepAlive(MInstruction* slotsOrElements, MInstruction* use) {
if (use->type() == MIRType::BigInt) {
return true;
}
+ if (use->isLoadTypedArrayElementHole() &&
+ Scalar::isBigIntType(use->toLoadTypedArrayElementHole()->arrayType())) {
+ return true;
+ }
MBasicBlock* block = use->block();
MInstructionIterator iter(block->begin(slotsOrElements));
diff --git a/js/src/jit/IonOptimizationLevels.h b/js/src/jit/IonOptimizationLevels.h
index e68dfaa124..92e4586131 100644
--- a/js/src/jit/IonOptimizationLevels.h
+++ b/js/src/jit/IonOptimizationLevels.h
@@ -181,8 +181,8 @@ class OptimizationInfo {
class OptimizationLevelInfo {
private:
- mozilla::EnumeratedArray<OptimizationLevel, OptimizationLevel::Count,
- OptimizationInfo>
+ mozilla::EnumeratedArray<OptimizationLevel, OptimizationInfo,
+ size_t(OptimizationLevel::Count)>
infos_;
public:
diff --git a/js/src/jit/JitFrames.cpp b/js/src/jit/JitFrames.cpp
index 7b3cb1184e..176b988e05 100644
--- a/js/src/jit/JitFrames.cpp
+++ b/js/src/jit/JitFrames.cpp
@@ -20,7 +20,6 @@
#include "jit/JitRuntime.h"
#include "jit/JitSpewer.h"
#include "jit/LIR.h"
-#include "jit/PcScriptCache.h"
#include "jit/Recover.h"
#include "jit/Safepoints.h"
#include "jit/ScriptFromCalleeToken.h"
@@ -922,32 +921,32 @@ static void TraceThisAndArguments(JSTracer* trc, const JSJitFrameIter& frame,
return;
}
- size_t nargs = layout->numActualArgs();
- size_t nformals = 0;
-
JSFunction* fun = CalleeTokenToFunction(layout->calleeToken());
+
+ size_t numFormals = fun->nargs();
+ size_t numArgs = std::max(layout->numActualArgs(), numFormals);
+ size_t firstArg = 0;
+
if (frame.type() != FrameType::JSJitToWasm &&
!frame.isExitFrameLayout<CalledFromJitExitFrameLayout>() &&
!fun->nonLazyScript()->mayReadFrameArgsDirectly()) {
- nformals = fun->nargs();
+ firstArg = numFormals;
}
- size_t newTargetOffset = std::max(nargs, fun->nargs());
-
Value* argv = layout->thisAndActualArgs();
// Trace |this|.
TraceRoot(trc, argv, "ion-thisv");
- // Trace actual arguments beyond the formals. Note + 1 for thisv.
- for (size_t i = nformals + 1; i < nargs + 1; i++) {
- TraceRoot(trc, &argv[i], "ion-argv");
+ // Trace arguments. Note + 1 for thisv.
+ for (size_t i = firstArg; i < numArgs; i++) {
+ TraceRoot(trc, &argv[i + 1], "ion-argv");
}
// Always trace the new.target from the frame. It's not in the snapshots.
// +1 to pass |this|
if (CalleeTokenIsConstructing(layout->calleeToken())) {
- TraceRoot(trc, &argv[1 + newTargetOffset], "ion-newTarget");
+ TraceRoot(trc, &argv[1 + numArgs], "ion-newTarget");
}
}
@@ -1539,90 +1538,6 @@ JSScript* GetTopJitJSScript(JSContext* cx) {
return frame.script();
}
-void GetPcScript(JSContext* cx, JSScript** scriptRes, jsbytecode** pcRes) {
- JitSpew(JitSpew_IonSnapshots, "Recover PC & Script from the last frame.");
-
- // Recover the return address so that we can look it up in the
- // PcScriptCache, as script/pc computation is expensive.
- JitActivationIterator actIter(cx);
- OnlyJSJitFrameIter it(actIter);
- uint8_t* retAddr;
- if (it.frame().isExitFrame()) {
- ++it;
-
- // Skip baseline interpreter entry frames.
- // Can exist before rectifier frames.
- if (it.frame().isBaselineInterpreterEntry()) {
- ++it;
- }
-
- // Skip rectifier frames.
- if (it.frame().isRectifier()) {
- ++it;
- MOZ_ASSERT(it.frame().isBaselineStub() || it.frame().isBaselineJS() ||
- it.frame().isIonJS());
- }
-
- // Skip Baseline/Ion stub and IC call frames.
- if (it.frame().isBaselineStub()) {
- ++it;
- MOZ_ASSERT(it.frame().isBaselineJS());
- } else if (it.frame().isIonICCall()) {
- ++it;
- MOZ_ASSERT(it.frame().isIonJS());
- }
-
- MOZ_ASSERT(it.frame().isBaselineJS() || it.frame().isIonJS());
-
- // Don't use the return address and the cache if the BaselineFrame is
- // running in the Baseline Interpreter. In this case the bytecode pc is
- // cheap to get, so we won't benefit from the cache, and the return address
- // does not map to a single bytecode pc.
- if (it.frame().isBaselineJS() &&
- it.frame().baselineFrame()->runningInInterpreter()) {
- it.frame().baselineScriptAndPc(scriptRes, pcRes);
- return;
- }
-
- retAddr = it.frame().resumePCinCurrentFrame();
- } else {
- MOZ_ASSERT(it.frame().isBailoutJS());
- retAddr = it.frame().returnAddress();
- }
-
- MOZ_ASSERT(retAddr);
-
- uint32_t hash = PcScriptCache::Hash(retAddr);
-
- // Lazily initialize the cache. The allocation may safely fail and will not
- // GC.
- if (MOZ_UNLIKELY(cx->ionPcScriptCache == nullptr)) {
- cx->ionPcScriptCache =
- MakeUnique<PcScriptCache>(cx->runtime()->gc.gcNumber());
- }
-
- if (cx->ionPcScriptCache.ref() &&
- cx->ionPcScriptCache->get(cx->runtime(), hash, retAddr, scriptRes,
- pcRes)) {
- return;
- }
-
- // Lookup failed: undertake expensive process to determine script and pc.
- if (it.frame().isIonJS() || it.frame().isBailoutJS()) {
- InlineFrameIterator ifi(cx, &it.frame());
- *scriptRes = ifi.script();
- *pcRes = ifi.pc();
- } else {
- MOZ_ASSERT(it.frame().isBaselineJS());
- it.frame().baselineScriptAndPc(scriptRes, pcRes);
- }
-
- // Add entry to cache.
- if (cx->ionPcScriptCache.ref()) {
- cx->ionPcScriptCache->add(hash, retAddr, *pcRes, *scriptRes);
- }
-}
-
RInstructionResults::RInstructionResults(JitFrameLayout* fp)
: results_(nullptr), fp_(fp), initialized_(false) {}
diff --git a/js/src/jit/JitFrames.h b/js/src/jit/JitFrames.h
index fe9b2942d3..ab882e7986 100644
--- a/js/src/jit/JitFrames.h
+++ b/js/src/jit/JitFrames.h
@@ -771,8 +771,6 @@ class InvalidationBailoutStack {
void checkInvariants() const;
};
-void GetPcScript(JSContext* cx, JSScript** scriptRes, jsbytecode** pcRes);
-
// Baseline requires one slot for this/argument type checks.
static const uint32_t MinJITStackSize = 1;
diff --git a/js/src/jit/JitOptions.cpp b/js/src/jit/JitOptions.cpp
index f8cdbef8ba..e9d389cf60 100644
--- a/js/src/jit/JitOptions.cpp
+++ b/js/src/jit/JitOptions.cpp
@@ -376,6 +376,10 @@ DefaultJitOptions::DefaultJitOptions() {
// ***** Irregexp shim flags *****
+ // Whether the stage 3 regexp modifiers proposal is enabled.
+ SET_DEFAULT(js_regexp_modifiers, false);
+ // Whether the stage 3 duplicate named capture groups proposal is enabled.
+ SET_DEFAULT(js_regexp_duplicate_named_groups, false);
// V8 uses this for differential fuzzing to handle stack overflows.
// We address the same problem in StackLimitCheck::HasOverflowed.
SET_DEFAULT(correctness_fuzzer_suppressions, false);
diff --git a/js/src/jit/JitOptions.h b/js/src/jit/JitOptions.h
index fd5a9726ed..d1fcae081c 100644
--- a/js/src/jit/JitOptions.h
+++ b/js/src/jit/JitOptions.h
@@ -143,6 +143,8 @@ struct DefaultJitOptions {
// Irregexp shim flags
bool correctness_fuzzer_suppressions;
bool enable_regexp_unaligned_accesses;
+ bool js_regexp_modifiers;
+ bool js_regexp_duplicate_named_groups;
bool regexp_possessive_quantifier;
bool regexp_optimization;
bool regexp_peephole_optimization;
diff --git a/js/src/jit/JitRuntime.h b/js/src/jit/JitRuntime.h
index d0ce8422de..7d038ed0e2 100644
--- a/js/src/jit/JitRuntime.h
+++ b/js/src/jit/JitRuntime.h
@@ -75,15 +75,15 @@ enum class BailoutReturnKind {
class BaselineICFallbackCode {
JitCode* code_ = nullptr;
using OffsetArray =
- mozilla::EnumeratedArray<BaselineICFallbackKind,
- BaselineICFallbackKind::Count, uint32_t>;
+ mozilla::EnumeratedArray<BaselineICFallbackKind, uint32_t,
+ size_t(BaselineICFallbackKind::Count)>;
OffsetArray offsets_ = {};
// Keep track of offset into various baseline stubs' code at return
// point from called script.
using BailoutReturnArray =
- mozilla::EnumeratedArray<BailoutReturnKind, BailoutReturnKind::Count,
- uint32_t>;
+ mozilla::EnumeratedArray<BailoutReturnKind, uint32_t,
+ size_t(BailoutReturnKind::Count)>;
BailoutReturnArray bailoutReturnOffsets_ = {};
public:
@@ -175,13 +175,13 @@ class JitRuntime {
WriteOnceData<uint32_t> doubleToInt32ValueStubOffset_{0};
// Thunk to do a generic call from Ion.
- mozilla::EnumeratedArray<IonGenericCallKind, IonGenericCallKind::Count,
- WriteOnceData<uint32_t>>
+ mozilla::EnumeratedArray<IonGenericCallKind, WriteOnceData<uint32_t>,
+ size_t(IonGenericCallKind::Count)>
ionGenericCallStubOffset_;
// Thunk used by the debugger for breakpoint and step mode.
- mozilla::EnumeratedArray<DebugTrapHandlerKind, DebugTrapHandlerKind::Count,
- WriteOnceData<JitCode*>>
+ mozilla::EnumeratedArray<DebugTrapHandlerKind, WriteOnceData<JitCode*>,
+ size_t(DebugTrapHandlerKind::Count)>
debugTrapHandlers_;
// BaselineInterpreter state.
diff --git a/js/src/jit/JitScript.cpp b/js/src/jit/JitScript.cpp
index f2f6ee2c25..62a14a70b6 100644
--- a/js/src/jit/JitScript.cpp
+++ b/js/src/jit/JitScript.cpp
@@ -517,7 +517,13 @@ void ICScript::purgeStubs(Zone* zone, ICStubSpace& newStubSpace) {
if (fallback->trialInliningState() == TrialInliningState::Inlined &&
hasInlinedChild(fallback->pcOffset())) {
MOZ_ASSERT(active());
- MOZ_ASSERT(findInlinedChild(fallback->pcOffset())->active());
+#ifdef DEBUG
+ // The callee script must be active. Also assert its bytecode size field
+ // is valid, because this helps catch memory safety issues (bug 1871947).
+ ICScript* callee = findInlinedChild(fallback->pcOffset());
+ MOZ_ASSERT(callee->active());
+ MOZ_ASSERT(callee->bytecodeSize() < inliningRoot()->totalBytecodeSize());
+#endif
JSRuntime* rt = zone->runtimeFromMainThread();
ICCacheIRStub* prev = nullptr;
@@ -718,6 +724,9 @@ static void MarkActiveICScriptsAndCopyStubs(
ICCacheIRStub* newStub = stub->clone(cx->runtime(), newStubSpace);
layout->setStubPtr(newStub);
+ // If this is a trial-inlining call site, also preserve the callee
+ // ICScript. Inlined constructor calls invoke CreateThisFromIC (which
+ // can trigger GC) before using the inlined ICScript.
JSJitFrameIter parentFrame(frame);
++parentFrame;
BaselineFrame* blFrame = parentFrame.baselineFrame();
diff --git a/js/src/jit/JitSpewer.cpp b/js/src/jit/JitSpewer.cpp
index 6fcd25d6e3..11e3165240 100644
--- a/js/src/jit/JitSpewer.cpp
+++ b/js/src/jit/JitSpewer.cpp
@@ -369,7 +369,6 @@ static void PrintHelpAndExit(int status = 0) {
"compiled functions only).\n"
" profiling Profiling-related information\n"
" dump-mir-expr Dump the MIR expressions\n"
- " scriptstats Tracelogger summary stats\n"
" warp-snapshots WarpSnapshots created by WarpOracle\n"
" warp-transpiler Warp CacheIR transpiler\n"
" warp-trial-inlining Trial inlining for Warp\n"
@@ -475,8 +474,6 @@ void jit::CheckLogging() {
EnableChannel(JitSpew_Profiling);
} else if (IsFlag(found, "dump-mir-expr")) {
EnableChannel(JitSpew_MIRExpressions);
- } else if (IsFlag(found, "scriptstats")) {
- EnableChannel(JitSpew_ScriptStats);
} else if (IsFlag(found, "warp-snapshots")) {
EnableChannel(JitSpew_WarpSnapshots);
} else if (IsFlag(found, "warp-transpiler")) {
diff --git a/js/src/jit/JitSpewer.h b/js/src/jit/JitSpewer.h
index 2cc56d9cf7..bfc92c74f2 100644
--- a/js/src/jit/JitSpewer.h
+++ b/js/src/jit/JitSpewer.h
@@ -69,8 +69,6 @@ namespace jit {
_(MarkLoadsUsedAsPropertyKeys) \
/* Output a list of MIR expressions */ \
_(MIRExpressions) \
- /* Spew Tracelogger summary stats */ \
- _(ScriptStats) \
\
/* BASELINE COMPILER SPEW */ \
\
diff --git a/js/src/jit/JitZone.h b/js/src/jit/JitZone.h
index a17b73c20e..d4f2350b8d 100644
--- a/js/src/jit/JitZone.h
+++ b/js/src/jit/JitZone.h
@@ -141,7 +141,8 @@ class JitZone {
Count
};
- mozilla::EnumeratedArray<StubIndex, StubIndex::Count, WeakHeapPtr<JitCode*>>
+ mozilla::EnumeratedArray<StubIndex, WeakHeapPtr<JitCode*>,
+ size_t(StubIndex::Count)>
stubs_;
mozilla::Maybe<IonCompilationId> currentCompilationId_;
diff --git a/js/src/jit/LIROps.yaml b/js/src/jit/LIROps.yaml
index 44ef48a4d8..f13c4b0745 100644
--- a/js/src/jit/LIROps.yaml
+++ b/js/src/jit/LIROps.yaml
@@ -1875,6 +1875,49 @@
operands:
object: WordSized
+# Read the length of a resizable typed array.
+- name: ResizableTypedArrayLength
+ result_type: WordSized
+ operands:
+ object: WordSized
+ arguments:
+ synchronization: js::jit::Synchronization
+ num_temps: 1
+
+# Read the possibly out-of-bounds byteOffset of a resizable typed array.
+- name: ResizableTypedArrayByteOffsetMaybeOutOfBounds
+ result_type: WordSized
+ operands:
+ object: WordSized
+ num_temps: 1
+
+# Read the byte length of a resizable data view.
+- name: ResizableDataViewByteLength
+ result_type: WordSized
+ operands:
+ object: WordSized
+ arguments:
+ synchronization: js::jit::Synchronization
+ num_temps: 1
+
+# Read the byte length of a growable shared array buffer.
+- name: GrowableSharedArrayBufferByteLength
+ result_type: WordSized
+ operands:
+ object: WordSized
+
+# Guard a resizable array buffer view is in-bounds.
+- name: GuardResizableArrayBufferViewInBounds
+ operands:
+ object: WordSized
+ num_temps: 1
+
+# Guard a resizable array buffer view is in-bounds.
+- name: GuardResizableArrayBufferViewInBoundsOrDetached
+ operands:
+ object: WordSized
+ num_temps: 1
+
- name: GuardHasAttachedArrayBuffer
operands:
object: WordSized
@@ -2052,9 +2095,9 @@
- name: LoadTypedArrayElementHole
result_type: BoxedValue
operands:
- object: WordSized
+ elements: WordSized
index: WordSized
- num_temps: 1
+ length: WordSized
mir_op: true
- name: LoadTypedArrayElementHoleBigInt
@@ -2941,6 +2984,11 @@
object: WordSized
num_temps: 1
+- name: GuardIsResizableTypedArray
+ operands:
+ object: WordSized
+ num_temps: 1
+
- name: GuardHasProxyHandler
operands:
object: WordSized
@@ -3069,6 +3117,13 @@
num_temps: 1
mir_op: true
+- name: GuardToEitherClass
+ result_type: WordSized
+ operands:
+ lhs: WordSized
+ num_temps: 1
+ mir_op: true
+
- name: GuardToFunction
result_type: WordSized
operands:
diff --git a/js/src/jit/Lowering.cpp b/js/src/jit/Lowering.cpp
index 8a28ea123c..b0007a114d 100644
--- a/js/src/jit/Lowering.cpp
+++ b/js/src/jit/Lowering.cpp
@@ -3828,6 +3828,20 @@ void LIRGenerator::visitGetNextEntryForIterator(MGetNextEntryForIterator* ins) {
define(lir, ins);
}
+static auto SynchronizeLoad(MemoryBarrierRequirement requiresBarrier) {
+ if (requiresBarrier == MemoryBarrierRequirement::Required) {
+ return Synchronization::Load();
+ }
+ return Synchronization::None();
+}
+
+static auto SynchronizeStore(MemoryBarrierRequirement requiresBarrier) {
+ if (requiresBarrier == MemoryBarrierRequirement::Required) {
+ return Synchronization::Store();
+ }
+ return Synchronization::None();
+}
+
void LIRGenerator::visitArrayBufferByteLength(MArrayBufferByteLength* ins) {
MOZ_ASSERT(ins->object()->type() == MIRType::Object);
MOZ_ASSERT(ins->type() == MIRType::IntPtr);
@@ -3870,6 +3884,70 @@ void LIRGenerator::visitTypedArrayElementSize(MTypedArrayElementSize* ins) {
ins);
}
+void LIRGenerator::visitResizableTypedArrayLength(
+ MResizableTypedArrayLength* ins) {
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->type() == MIRType::IntPtr);
+
+ auto sync = SynchronizeLoad(ins->requiresMemoryBarrier());
+ auto* lir = new (alloc())
+ LResizableTypedArrayLength(useRegister(ins->object()), temp(), sync);
+ define(lir, ins);
+}
+
+void LIRGenerator::visitResizableTypedArrayByteOffsetMaybeOutOfBounds(
+ MResizableTypedArrayByteOffsetMaybeOutOfBounds* ins) {
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->type() == MIRType::IntPtr);
+
+ auto* lir = new (alloc()) LResizableTypedArrayByteOffsetMaybeOutOfBounds(
+ useRegister(ins->object()), temp());
+ define(lir, ins);
+}
+
+void LIRGenerator::visitResizableDataViewByteLength(
+ MResizableDataViewByteLength* ins) {
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->type() == MIRType::IntPtr);
+
+ auto sync = SynchronizeLoad(ins->requiresMemoryBarrier());
+ auto* lir = new (alloc())
+ LResizableDataViewByteLength(useRegister(ins->object()), temp(), sync);
+ define(lir, ins);
+}
+
+void LIRGenerator::visitGrowableSharedArrayBufferByteLength(
+ MGrowableSharedArrayBufferByteLength* ins) {
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->type() == MIRType::IntPtr);
+
+ auto* lir = new (alloc())
+ LGrowableSharedArrayBufferByteLength(useRegisterAtStart(ins->object()));
+ define(lir, ins);
+}
+
+void LIRGenerator::visitGuardResizableArrayBufferViewInBounds(
+ MGuardResizableArrayBufferViewInBounds* ins) {
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+
+ auto* lir = new (alloc()) LGuardResizableArrayBufferViewInBounds(
+ useRegister(ins->object()), temp());
+ assignSnapshot(lir, ins->bailoutKind());
+ add(lir, ins);
+ redefine(ins, ins->object());
+}
+
+void LIRGenerator::visitGuardResizableArrayBufferViewInBoundsOrDetached(
+ MGuardResizableArrayBufferViewInBoundsOrDetached* ins) {
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+
+ auto* lir = new (alloc()) LGuardResizableArrayBufferViewInBoundsOrDetached(
+ useRegister(ins->object()), temp());
+ assignSnapshot(lir, ins->bailoutKind());
+ add(lir, ins);
+ redefine(ins, ins->object());
+}
+
void LIRGenerator::visitGuardHasAttachedArrayBuffer(
MGuardHasAttachedArrayBuffer* ins) {
MOZ_ASSERT(ins->object()->type() == MIRType::Object);
@@ -4298,8 +4376,9 @@ void LIRGenerator::visitLoadUnboxedScalar(MLoadUnboxedScalar* ins) {
MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
MOZ_ASSERT(IsNumericType(ins->type()) || ins->type() == MIRType::Boolean);
- if (Scalar::isBigIntType(ins->storageType()) &&
- ins->requiresMemoryBarrier()) {
+ auto sync = SynchronizeLoad(ins->requiresMemoryBarrier());
+
+ if (Scalar::isBigIntType(ins->storageType()) && !sync.isNone()) {
lowerAtomicLoad64(ins);
return;
}
@@ -4310,8 +4389,7 @@ void LIRGenerator::visitLoadUnboxedScalar(MLoadUnboxedScalar* ins) {
// NOTE: the generated code must match the assembly code in gen_load in
// GenerateAtomicOperations.py
- Synchronization sync = Synchronization::Load();
- if (ins->requiresMemoryBarrier()) {
+ if (!sync.isNone()) {
LMemoryBarrier* fence = new (alloc()) LMemoryBarrier(sync.barrierBefore);
add(fence, ins);
}
@@ -4338,7 +4416,7 @@ void LIRGenerator::visitLoadUnboxedScalar(MLoadUnboxedScalar* ins) {
assignSafepoint(lir, ins);
}
- if (ins->requiresMemoryBarrier()) {
+ if (!sync.isNone()) {
LMemoryBarrier* fence = new (alloc()) LMemoryBarrier(sync.barrierAfter);
add(fence, ins);
}
@@ -4431,29 +4509,32 @@ void LIRGenerator::visitClampToUint8(MClampToUint8* ins) {
void LIRGenerator::visitLoadTypedArrayElementHole(
MLoadTypedArrayElementHole* ins) {
- MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
+ MOZ_ASSERT(ins->length()->type() == MIRType::IntPtr);
MOZ_ASSERT(ins->type() == MIRType::Value);
- const LUse object = useRegister(ins->object());
+ const LUse elements = useRegister(ins->elements());
const LAllocation index = useRegister(ins->index());
+ const LAllocation length = useRegister(ins->length());
if (!Scalar::isBigIntType(ins->arrayType())) {
- auto* lir = new (alloc()) LLoadTypedArrayElementHole(object, index, temp());
+ auto* lir =
+ new (alloc()) LLoadTypedArrayElementHole(elements, index, length);
if (ins->fallible()) {
assignSnapshot(lir, ins->bailoutKind());
}
defineBox(lir, ins);
} else {
#ifdef JS_CODEGEN_X86
- LDefinition tmp = LDefinition::BogusTemp();
+ LInt64Definition temp64 = LInt64Definition::BogusTemp();
#else
- LDefinition tmp = temp();
+ LInt64Definition temp64 = tempInt64();
#endif
- auto* lir = new (alloc())
- LLoadTypedArrayElementHoleBigInt(object, index, tmp, tempInt64());
+ auto* lir = new (alloc()) LLoadTypedArrayElementHoleBigInt(
+ elements, index, length, temp(), temp64);
defineBox(lir, ins);
assignSafepoint(lir, ins);
}
@@ -4474,7 +4555,9 @@ void LIRGenerator::visitStoreUnboxedScalar(MStoreUnboxedScalar* ins) {
MOZ_ASSERT(ins->value()->type() == MIRType::Int32);
}
- if (ins->isBigIntWrite() && ins->requiresMemoryBarrier()) {
+ auto sync = SynchronizeStore(ins->requiresMemoryBarrier());
+
+ if (ins->isBigIntWrite() && !sync.isNone()) {
lowerAtomicStore64(ins);
return;
}
@@ -4500,8 +4583,7 @@ void LIRGenerator::visitStoreUnboxedScalar(MStoreUnboxedScalar* ins) {
//
// NOTE: the generated code must match the assembly code in gen_store in
// GenerateAtomicOperations.py
- Synchronization sync = Synchronization::Store();
- if (ins->requiresMemoryBarrier()) {
+ if (!sync.isNone()) {
LMemoryBarrier* fence = new (alloc()) LMemoryBarrier(sync.barrierBefore);
add(fence, ins);
}
@@ -4511,7 +4593,7 @@ void LIRGenerator::visitStoreUnboxedScalar(MStoreUnboxedScalar* ins) {
add(new (alloc()) LStoreUnboxedBigInt(elements, index, value, tempInt64()),
ins);
}
- if (ins->requiresMemoryBarrier()) {
+ if (!sync.isNone()) {
LMemoryBarrier* fence = new (alloc()) LMemoryBarrier(sync.barrierAfter);
add(fence, ins);
}
@@ -5154,6 +5236,17 @@ void LIRGenerator::visitGuardIsFixedLengthTypedArray(
redefine(ins, ins->object());
}
+void LIRGenerator::visitGuardIsResizableTypedArray(
+ MGuardIsResizableTypedArray* ins) {
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+
+ auto* lir = new (alloc())
+ LGuardIsResizableTypedArray(useRegister(ins->object()), temp());
+ assignSnapshot(lir, ins->bailoutKind());
+ add(lir, ins);
+ redefine(ins, ins->object());
+}
+
void LIRGenerator::visitGuardHasProxyHandler(MGuardHasProxyHandler* ins) {
MOZ_ASSERT(ins->object()->type() == MIRType::Object);
@@ -5694,6 +5787,15 @@ void LIRGenerator::visitGuardToClass(MGuardToClass* ins) {
defineReuseInput(lir, ins, 0);
}
+void LIRGenerator::visitGuardToEitherClass(MGuardToEitherClass* ins) {
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->type() == MIRType::Object);
+ auto* lir = new (alloc())
+ LGuardToEitherClass(useRegisterAtStart(ins->object()), temp());
+ assignSnapshot(lir, ins->bailoutKind());
+ defineReuseInput(lir, ins, 0);
+}
+
void LIRGenerator::visitGuardToFunction(MGuardToFunction* ins) {
MOZ_ASSERT(ins->object()->type() == MIRType::Object);
MOZ_ASSERT(ins->type() == MIRType::Object);
@@ -7018,6 +7120,11 @@ void LIRGenerator::visitMapObjectSize(MMapObjectSize* ins) {
define(lir, ins);
}
+void LIRGenerator::visitPostIntPtrConversion(MPostIntPtrConversion* ins) {
+ // This operation is a no-op.
+ redefine(ins, ins->input());
+}
+
void LIRGenerator::visitConstant(MConstant* ins) {
if (!IsFloatingPointType(ins->type()) && ins->canEmitAtUses()) {
emitAtUses(ins);
diff --git a/js/src/jit/MIR.cpp b/js/src/jit/MIR.cpp
index dbaa73c9dd..c6daecb166 100644
--- a/js/src/jit/MIR.cpp
+++ b/js/src/jit/MIR.cpp
@@ -6365,6 +6365,81 @@ AliasSet MGuardHasAttachedArrayBuffer::getAliasSet() const {
return AliasSet::Load(AliasSet::ObjectFields | AliasSet::FixedSlot);
}
+AliasSet MResizableTypedArrayByteOffsetMaybeOutOfBounds::getAliasSet() const {
+ // Loads the byteOffset and additionally checks for detached buffers, so the
+ // alias set also has to include |ObjectFields| and |FixedSlot|.
+ return AliasSet::Load(AliasSet::ArrayBufferViewLengthOrOffset |
+ AliasSet::ObjectFields | AliasSet::FixedSlot);
+}
+
+AliasSet MResizableTypedArrayLength::getAliasSet() const {
+ // Loads the length and byteOffset slots, the shared-elements flag, the
+ // auto-length fixed slot, and the shared raw-buffer length.
+ auto flags = AliasSet::ArrayBufferViewLengthOrOffset |
+ AliasSet::ObjectFields | AliasSet::FixedSlot |
+ AliasSet::SharedArrayRawBufferLength;
+
+ // When a barrier is needed make the instruction effectful by giving it a
+ // "store" effect. Also prevent reordering LoadUnboxedScalar before this
+ // instruction by including |UnboxedElement| in the alias set.
+ if (requiresMemoryBarrier() == MemoryBarrierRequirement::Required) {
+ return AliasSet::Store(flags | AliasSet::UnboxedElement);
+ }
+ return AliasSet::Load(flags);
+}
+
+bool MResizableTypedArrayLength::congruentTo(const MDefinition* ins) const {
+ if (requiresMemoryBarrier() == MemoryBarrierRequirement::Required) {
+ return false;
+ }
+ return congruentIfOperandsEqual(ins);
+}
+
+AliasSet MResizableDataViewByteLength::getAliasSet() const {
+ // Loads the length and byteOffset slots, the shared-elements flag, the
+ // auto-length fixed slot, and the shared raw-buffer length.
+ auto flags = AliasSet::ArrayBufferViewLengthOrOffset |
+ AliasSet::ObjectFields | AliasSet::FixedSlot |
+ AliasSet::SharedArrayRawBufferLength;
+
+ // When a barrier is needed make the instruction effectful by giving it a
+ // "store" effect. Also prevent reordering LoadUnboxedScalar before this
+ // instruction by including |UnboxedElement| in the alias set.
+ if (requiresMemoryBarrier() == MemoryBarrierRequirement::Required) {
+ return AliasSet::Store(flags | AliasSet::UnboxedElement);
+ }
+ return AliasSet::Load(flags);
+}
+
+bool MResizableDataViewByteLength::congruentTo(const MDefinition* ins) const {
+ if (requiresMemoryBarrier() == MemoryBarrierRequirement::Required) {
+ return false;
+ }
+ return congruentIfOperandsEqual(ins);
+}
+
+AliasSet MGrowableSharedArrayBufferByteLength::getAliasSet() const {
+ // Requires a barrier, so make the instruction effectful by giving it a
+ // "store" effect. Also prevent reordering LoadUnboxedScalar before this
+ // instruction by including |UnboxedElement| in the alias set.
+ return AliasSet::Store(AliasSet::FixedSlot |
+ AliasSet::SharedArrayRawBufferLength |
+ AliasSet::UnboxedElement);
+}
+
+AliasSet MGuardResizableArrayBufferViewInBounds::getAliasSet() const {
+ // Additionally reads the |initialLength| and |initialByteOffset| slots, but
+ // since these can't change after construction, we don't need to track them.
+ return AliasSet::Load(AliasSet::ArrayBufferViewLengthOrOffset);
+}
+
+AliasSet MGuardResizableArrayBufferViewInBoundsOrDetached::getAliasSet() const {
+ // Loads the byteOffset and additionally checks for detached buffers, so the
+ // alias set also has to include |ObjectFields| and |FixedSlot|.
+ return AliasSet::Load(AliasSet::ArrayBufferViewLengthOrOffset |
+ AliasSet::ObjectFields | AliasSet::FixedSlot);
+}
+
AliasSet MArrayPush::getAliasSet() const {
return AliasSet::Store(AliasSet::ObjectFields | AliasSet::Element);
}
@@ -6882,6 +6957,16 @@ MDefinition* MGuardToClass::foldsTo(TempAllocator& alloc) {
return object();
}
+MDefinition* MGuardToEitherClass::foldsTo(TempAllocator& alloc) {
+ const JSClass* clasp = GetObjectKnownJSClass(object());
+ if (!clasp || (getClass1() != clasp && getClass2() != clasp)) {
+ return this;
+ }
+
+ AssertKnownClass(alloc, this, object());
+ return object();
+}
+
MDefinition* MGuardToFunction::foldsTo(TempAllocator& alloc) {
if (GetObjectKnownClass(object()) != KnownClass::Function) {
return this;
diff --git a/js/src/jit/MIR.h b/js/src/jit/MIR.h
index 07701847eb..d882665a65 100644
--- a/js/src/jit/MIR.h
+++ b/js/src/jit/MIR.h
@@ -421,10 +421,13 @@ class AliasSet {
// The generation counter associated with the global object
GlobalGenerationCounter = 1 << 26,
- Last = GlobalGenerationCounter,
+ // The SharedArrayRawBuffer::length field.
+ SharedArrayRawBufferLength = 1 << 27,
+
+ Last = SharedArrayRawBufferLength,
Any = Last | (Last - 1),
- NumCategories = 27,
+ NumCategories = 28,
// Indicates load or store.
Store_ = 1 << 31
@@ -657,7 +660,13 @@ class MDefinition : public MNode {
virtual HashNumber valueHash() const;
virtual bool congruentTo(const MDefinition* ins) const { return false; }
const MDefinition* skipObjectGuards() const;
+
+ // Note that, for a call `congruentIfOperandsEqual(ins)` inside some class
+ // MFoo, if `true` is returned then we are ensured that `ins` is also an
+ // MFoo, so it is safe to do `ins->toMFoo()` without first checking whether
+ // `ins->isMFoo()`.
bool congruentIfOperandsEqual(const MDefinition* ins) const;
+
virtual MDefinition* foldsTo(TempAllocator& alloc);
virtual void analyzeEdgeCasesForward();
virtual void analyzeEdgeCasesBackward();
@@ -1277,6 +1286,35 @@ class MVariadicT : public T {
// initializes the operands_ array and must be checked for OOM.
using MVariadicInstruction = MVariadicT<MInstruction>;
+// All barriered operations:
+// - MCompareExchangeTypedArrayElement
+// - MExchangeTypedArrayElement
+// - MAtomicTypedArrayElementBinop
+// - MGrowableSharedArrayBufferByteLength
+//
+// And operations which are optionally barriered:
+// - MLoadUnboxedScalar
+// - MStoreUnboxedScalar
+// - MResizableTypedArrayLength
+// - MResizableDataViewByteLength
+//
+// Must have the following attributes:
+//
+// - Not movable
+// - Not removable
+// - Not congruent with any other instruction
+// - Effectful (they alias every TypedArray store)
+//
+// The intended effect of those constraints is to prevent all loads and stores
+// preceding the barriered operation from being moved to after the barriered
+// operation, and vice versa, and to prevent the barriered operation from being
+// removed or hoisted.
+
+enum class MemoryBarrierRequirement : bool {
+ NotRequired,
+ Required,
+};
+
MIR_OPCODE_CLASS_GENERATED
// Truncation barrier. This is intended for protecting its input against
@@ -7040,44 +7078,22 @@ class MArrayPopShift : public MUnaryInstruction,
ALLOW_CLONE(MArrayPopShift)
};
-// All barriered operations - MCompareExchangeTypedArrayElement,
-// MExchangeTypedArrayElement, and MAtomicTypedArrayElementBinop, as
-// well as MLoadUnboxedScalar and MStoreUnboxedScalar when they are
-// marked as requiring a memory barrer - have the following
-// attributes:
-//
-// - Not movable
-// - Not removable
-// - Not congruent with any other instruction
-// - Effectful (they alias every TypedArray store)
-//
-// The intended effect of those constraints is to prevent all loads
-// and stores preceding the barriered operation from being moved to
-// after the barriered operation, and vice versa, and to prevent the
-// barriered operation from being removed or hoisted.
-
-enum MemoryBarrierRequirement {
- DoesNotRequireMemoryBarrier,
- DoesRequireMemoryBarrier
-};
-
-// Also see comments at MMemoryBarrierRequirement, above.
-
// Load an unboxed scalar value from an array buffer view or other object.
class MLoadUnboxedScalar : public MBinaryInstruction,
public NoTypePolicy::Data {
int32_t offsetAdjustment_ = 0;
Scalar::Type storageType_;
- bool requiresBarrier_;
+ MemoryBarrierRequirement requiresBarrier_;
- MLoadUnboxedScalar(
- MDefinition* elements, MDefinition* index, Scalar::Type storageType,
- MemoryBarrierRequirement requiresBarrier = DoesNotRequireMemoryBarrier)
+ MLoadUnboxedScalar(MDefinition* elements, MDefinition* index,
+ Scalar::Type storageType,
+ MemoryBarrierRequirement requiresBarrier =
+ MemoryBarrierRequirement::NotRequired)
: MBinaryInstruction(classOpcode, elements, index),
storageType_(storageType),
- requiresBarrier_(requiresBarrier == DoesRequireMemoryBarrier) {
+ requiresBarrier_(requiresBarrier) {
setResultType(MIRType::Value);
- if (requiresBarrier_) {
+ if (requiresBarrier_ == MemoryBarrierRequirement::Required) {
setGuard(); // Not removable or movable
} else {
setMovable();
@@ -7097,7 +7113,7 @@ class MLoadUnboxedScalar : public MBinaryInstruction,
// Bailout if the result does not fit in an int32.
return storageType_ == Scalar::Uint32 && type() == MIRType::Int32;
}
- bool requiresMemoryBarrier() const { return requiresBarrier_; }
+ auto requiresMemoryBarrier() const { return requiresBarrier_; }
int32_t offsetAdjustment() const { return offsetAdjustment_; }
void setOffsetAdjustment(int32_t offsetAdjustment) {
offsetAdjustment_ = offsetAdjustment;
@@ -7105,14 +7121,14 @@ class MLoadUnboxedScalar : public MBinaryInstruction,
AliasSet getAliasSet() const override {
// When a barrier is needed make the instruction effectful by
// giving it a "store" effect.
- if (requiresBarrier_) {
+ if (requiresBarrier_ == MemoryBarrierRequirement::Required) {
return AliasSet::Store(AliasSet::UnboxedElement);
}
return AliasSet::Load(AliasSet::UnboxedElement);
}
bool congruentTo(const MDefinition* ins) const override {
- if (requiresBarrier_) {
+ if (requiresBarrier_ == MemoryBarrierRequirement::Required) {
return false;
}
if (!ins->isLoadUnboxedScalar()) {
@@ -7198,26 +7214,29 @@ class MLoadDataViewElement : public MTernaryInstruction,
};
// Load a value from a typed array. Out-of-bounds accesses are handled in-line.
-class MLoadTypedArrayElementHole : public MBinaryInstruction,
- public SingleObjectPolicy::Data {
+class MLoadTypedArrayElementHole : public MTernaryInstruction,
+ public NoTypePolicy::Data {
Scalar::Type arrayType_;
bool forceDouble_;
- MLoadTypedArrayElementHole(MDefinition* object, MDefinition* index,
- Scalar::Type arrayType, bool forceDouble)
- : MBinaryInstruction(classOpcode, object, index),
+ MLoadTypedArrayElementHole(MDefinition* elements, MDefinition* index,
+ MDefinition* length, Scalar::Type arrayType,
+ bool forceDouble)
+ : MTernaryInstruction(classOpcode, elements, index, length),
arrayType_(arrayType),
forceDouble_(forceDouble) {
setResultType(MIRType::Value);
setMovable();
+ MOZ_ASSERT(elements->type() == MIRType::Elements);
MOZ_ASSERT(index->type() == MIRType::IntPtr);
+ MOZ_ASSERT(length->type() == MIRType::IntPtr);
MOZ_ASSERT(arrayType >= 0 && arrayType < Scalar::MaxTypedArrayViewType);
}
public:
INSTRUCTION_HEADER(LoadTypedArrayElementHole)
TRIVIAL_NEW_WRAPPERS
- NAMED_OPERANDS((0, object), (1, index))
+ NAMED_OPERANDS((0, elements), (1, index), (2, length))
Scalar::Type arrayType() const { return arrayType_; }
bool forceDouble() const { return forceDouble_; }
@@ -7239,8 +7258,7 @@ class MLoadTypedArrayElementHole : public MBinaryInstruction,
return congruentIfOperandsEqual(other);
}
AliasSet getAliasSet() const override {
- return AliasSet::Load(AliasSet::UnboxedElement | AliasSet::ObjectFields |
- AliasSet::ArrayBufferViewLengthOrOffset);
+ return AliasSet::Load(AliasSet::UnboxedElement);
}
bool canProduceFloat32() const override {
return arrayType_ == Scalar::Float32;
@@ -7280,16 +7298,16 @@ class StoreUnboxedScalarBase {
class MStoreUnboxedScalar : public MTernaryInstruction,
public StoreUnboxedScalarBase,
public StoreUnboxedScalarPolicy::Data {
- bool requiresBarrier_;
+ MemoryBarrierRequirement requiresBarrier_;
- MStoreUnboxedScalar(
- MDefinition* elements, MDefinition* index, MDefinition* value,
- Scalar::Type storageType,
- MemoryBarrierRequirement requiresBarrier = DoesNotRequireMemoryBarrier)
+ MStoreUnboxedScalar(MDefinition* elements, MDefinition* index,
+ MDefinition* value, Scalar::Type storageType,
+ MemoryBarrierRequirement requiresBarrier =
+ MemoryBarrierRequirement::NotRequired)
: MTernaryInstruction(classOpcode, elements, index, value),
StoreUnboxedScalarBase(storageType),
- requiresBarrier_(requiresBarrier == DoesRequireMemoryBarrier) {
- if (requiresBarrier_) {
+ requiresBarrier_(requiresBarrier) {
+ if (requiresBarrier_ == MemoryBarrierRequirement::Required) {
setGuard(); // Not removable or movable
}
MOZ_ASSERT(elements->type() == MIRType::Elements);
@@ -7305,7 +7323,7 @@ class MStoreUnboxedScalar : public MTernaryInstruction,
AliasSet getAliasSet() const override {
return AliasSet::Store(AliasSet::UnboxedElement);
}
- bool requiresMemoryBarrier() const { return requiresBarrier_; }
+ auto requiresMemoryBarrier() const { return requiresBarrier_; }
TruncateKind operandTruncateKind(size_t index) const override;
bool canConsumeFloat32(MUse* use) const override {
@@ -8997,6 +9015,55 @@ class MGuardToClass : public MUnaryInstruction,
}
};
+class MGuardToEitherClass : public MUnaryInstruction,
+ public SingleObjectPolicy::Data {
+ const JSClass* class1_;
+ const JSClass* class2_;
+
+ MGuardToEitherClass(MDefinition* object, const JSClass* clasp1,
+ const JSClass* clasp2)
+ : MUnaryInstruction(classOpcode, object),
+ class1_(clasp1),
+ class2_(clasp2) {
+ MOZ_ASSERT(object->type() == MIRType::Object);
+ MOZ_ASSERT(clasp1 != clasp2, "Use MGuardToClass instead");
+ MOZ_ASSERT(!clasp1->isJSFunction(), "Use MGuardToFunction instead");
+ MOZ_ASSERT(!clasp2->isJSFunction(), "Use MGuardToFunction instead");
+ setResultType(MIRType::Object);
+ setMovable();
+
+ // We will bail out if the class type is incorrect, so we need to ensure we
+ // don't eliminate this instruction
+ setGuard();
+ }
+
+ public:
+ INSTRUCTION_HEADER(GuardToEitherClass)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object))
+
+ const JSClass* getClass1() const { return class1_; }
+ const JSClass* getClass2() const { return class2_; }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isGuardToEitherClass()) {
+ return false;
+ }
+ const auto* other = ins->toGuardToEitherClass();
+ if (getClass1() != other->getClass1() &&
+ getClass1() != other->getClass2()) {
+ return false;
+ }
+ if (getClass2() != other->getClass1() &&
+ getClass2() != other->getClass2()) {
+ return false;
+ }
+ return congruentIfOperandsEqual(ins);
+ }
+};
+
class MGuardToFunction : public MUnaryInstruction,
public SingleObjectPolicy::Data {
explicit MGuardToFunction(MDefinition* object)
@@ -9337,6 +9404,23 @@ class MObjectToIterator : public MUnaryInstruction,
void setWantsIndices(bool value) { wantsIndices_ = value; }
};
+class MPostIntPtrConversion : public MUnaryInstruction,
+ public NoTypePolicy::Data {
+ explicit MPostIntPtrConversion(MDefinition* input)
+ : MUnaryInstruction(classOpcode, input) {
+ // Passes through the input.
+ setResultType(input->type());
+
+ // Note: Must be non-movable so we can attach a resume point.
+ }
+
+ public:
+ INSTRUCTION_HEADER(PostIntPtrConversion)
+ TRIVIAL_NEW_WRAPPERS
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+};
+
// Flips the input's sign bit, independently of the rest of the number's
// payload. Note this is different from multiplying by minus-one, which has
// side-effects for e.g. NaNs.
@@ -10808,6 +10892,8 @@ class MWasmReinterpret : public MUnaryInstruction, public NoTypePolicy::Data {
AliasSet getAliasSet() const override { return AliasSet::None(); }
bool congruentTo(const MDefinition* ins) const override {
+ // No need to check type() here, because congruentIfOperandsEqual will
+ // check it.
return congruentIfOperandsEqual(ins);
}
@@ -10867,7 +10953,8 @@ class MWasmTernarySimd128 : public MTernaryInstruction,
AliasSet getAliasSet() const override { return AliasSet::None(); }
bool congruentTo(const MDefinition* ins) const override {
- return congruentIfOperandsEqual(ins);
+ return congruentIfOperandsEqual(ins) &&
+ simdOp() == ins->toWasmTernarySimd128()->simdOp();
}
#ifdef ENABLE_WASM_SIMD
MDefinition* foldsTo(TempAllocator& alloc) override;
@@ -10908,8 +10995,8 @@ class MWasmBinarySimd128 : public MBinaryInstruction,
AliasSet getAliasSet() const override { return AliasSet::None(); }
bool congruentTo(const MDefinition* ins) const override {
- return ins->toWasmBinarySimd128()->simdOp() == simdOp_ &&
- congruentIfOperandsEqual(ins);
+ return congruentIfOperandsEqual(ins) &&
+ ins->toWasmBinarySimd128()->simdOp() == simdOp_;
}
#ifdef ENABLE_WASM_SIMD
MDefinition* foldsTo(TempAllocator& alloc) override;
@@ -10945,8 +11032,8 @@ class MWasmBinarySimd128WithConstant : public MUnaryInstruction,
AliasSet getAliasSet() const override { return AliasSet::None(); }
bool congruentTo(const MDefinition* ins) const override {
- return ins->toWasmBinarySimd128WithConstant()->simdOp() == simdOp_ &&
- congruentIfOperandsEqual(ins) &&
+ return congruentIfOperandsEqual(ins) &&
+ ins->toWasmBinarySimd128WithConstant()->simdOp() == simdOp_ &&
rhs_.bitwiseEqual(ins->toWasmBinarySimd128WithConstant()->rhs());
}
@@ -10978,9 +11065,9 @@ class MWasmReplaceLaneSimd128 : public MBinaryInstruction,
AliasSet getAliasSet() const override { return AliasSet::None(); }
bool congruentTo(const MDefinition* ins) const override {
- return ins->toWasmReplaceLaneSimd128()->simdOp() == simdOp_ &&
- ins->toWasmReplaceLaneSimd128()->laneIndex() == laneIndex_ &&
- congruentIfOperandsEqual(ins);
+ return congruentIfOperandsEqual(ins) &&
+ ins->toWasmReplaceLaneSimd128()->simdOp() == simdOp_ &&
+ ins->toWasmReplaceLaneSimd128()->laneIndex() == laneIndex_;
}
uint32_t laneIndex() const { return laneIndex_; }
@@ -11006,8 +11093,8 @@ class MWasmScalarToSimd128 : public MUnaryInstruction,
AliasSet getAliasSet() const override { return AliasSet::None(); }
bool congruentTo(const MDefinition* ins) const override {
- return ins->toWasmScalarToSimd128()->simdOp() == simdOp_ &&
- congruentIfOperandsEqual(ins);
+ return congruentIfOperandsEqual(ins) &&
+ ins->toWasmScalarToSimd128()->simdOp() == simdOp_;
}
#ifdef ENABLE_WASM_SIMD
MDefinition* foldsTo(TempAllocator& alloc) override;
@@ -11036,9 +11123,9 @@ class MWasmReduceSimd128 : public MUnaryInstruction, public NoTypePolicy::Data {
AliasSet getAliasSet() const override { return AliasSet::None(); }
bool congruentTo(const MDefinition* ins) const override {
- return ins->toWasmReduceSimd128()->simdOp() == simdOp_ &&
- ins->toWasmReduceSimd128()->imm() == imm_ &&
- congruentIfOperandsEqual(ins);
+ return congruentIfOperandsEqual(ins) &&
+ ins->toWasmReduceSimd128()->simdOp() == simdOp_ &&
+ ins->toWasmReduceSimd128()->imm() == imm_;
}
#ifdef ENABLE_WASM_SIMD
MDefinition* foldsTo(TempAllocator& alloc) override;
diff --git a/js/src/jit/MIROps.yaml b/js/src/jit/MIROps.yaml
index 565c3c9c2b..7f0df52742 100644
--- a/js/src/jit/MIROps.yaml
+++ b/js/src/jit/MIROps.yaml
@@ -1331,7 +1331,6 @@
folds_to: custom
congruent_to: if_operands_equal
alias_set: none
- movable: true
can_recover: true
- name: ModuleMetadata
@@ -1492,6 +1491,54 @@
alias_set: custom
clone: true
+# Implements the TypedArrayByteOffset intrinsic for resizable typed arrays,
+# which calls TypedArrayObject::byteOffsetMaybeOutOfBounds().
+- name: ResizableTypedArrayByteOffsetMaybeOutOfBounds
+ operands:
+ object: Object
+ result_type: IntPtr
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: custom
+ compute_range: custom
+
+# Read the length of a resizable typed array.
+- name: ResizableTypedArrayLength
+ operands:
+ object: Object
+ arguments:
+ requiresMemoryBarrier: MemoryBarrierRequirement
+ result_type: IntPtr
+ # Not removable or movable when a barrier is needed.
+ guard: true
+ movable: false
+ congruent_to: custom
+ alias_set: custom
+ compute_range: custom
+
+# Read the byteLength of a resizable dataview.
+- name: ResizableDataViewByteLength
+ operands:
+ object: Object
+ arguments:
+ requiresMemoryBarrier: MemoryBarrierRequirement
+ result_type: IntPtr
+ # Not removable or movable when a barrier is needed.
+ guard: true
+ movable: false
+ congruent_to: custom
+ alias_set: custom
+ compute_range: custom
+
+# Read the byte length of a growable shared array buffer as IntPtr.
+- name: GrowableSharedArrayBufferByteLength
+ operands:
+ object: Object
+ result_type: IntPtr
+ guard: true
+ movable: false
+ alias_set: custom
+
# Return the element size of a typed array.
- name: TypedArrayElementSize
operands:
@@ -1513,6 +1560,26 @@
congruent_to: if_operands_equal
alias_set: custom
+# Guard a resizable typed array is in-bounds.
+- name: GuardResizableArrayBufferViewInBounds
+ operands:
+ object: Object
+ result_type: Object
+ guard: true
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: custom
+
+# Guard a resizable typed array is in-bounds or detached.
+- name: GuardResizableArrayBufferViewInBoundsOrDetached
+ operands:
+ object: Object
+ result_type: Object
+ guard: true
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: custom
+
- name: GuardNumberToIntPtrIndex
gen_boilerplate: false
@@ -1951,6 +2018,15 @@
congruent_to: if_operands_equal
alias_set: none
+- name: GuardIsResizableTypedArray
+ operands:
+ object: Object
+ result_type: Object
+ guard: true
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: none
+
- name: GuardHasProxyHandler
operands:
object: Object
@@ -2510,6 +2586,9 @@
- name: GuardToClass
gen_boilerplate: false
+- name: GuardToEitherClass
+ gen_boilerplate: false
+
- name: GuardToFunction
gen_boilerplate: false
@@ -3106,6 +3185,9 @@
congruent_to: if_operands_equal
alias_set: custom
+- name: PostIntPtrConversion
+ gen_boilerplate: false
+
- name: WasmNeg
gen_boilerplate: false
diff --git a/js/src/jit/MacroAssembler-inl.h b/js/src/jit/MacroAssembler-inl.h
index beba576a22..e1df31eff9 100644
--- a/js/src/jit/MacroAssembler-inl.h
+++ b/js/src/jit/MacroAssembler-inl.h
@@ -606,9 +606,7 @@ void MacroAssembler::branchTestObjClass(Condition cond, Register obj,
MOZ_ASSERT(obj != scratch);
MOZ_ASSERT(scratch != spectreRegToZero);
- loadPtr(Address(obj, JSObject::offsetOfShape()), scratch);
- loadPtr(Address(scratch, Shape::offsetOfBaseShape()), scratch);
- loadPtr(Address(scratch, BaseShape::offsetOfClasp()), scratch);
+ loadObjClassUnsafe(obj, scratch);
branchPtr(cond, clasp, scratch, label);
if (JitOptions.spectreObjectMitigations) {
@@ -620,9 +618,7 @@ void MacroAssembler::branchTestObjClassNoSpectreMitigations(
Condition cond, Register obj, const Address& clasp, Register scratch,
Label* label) {
MOZ_ASSERT(obj != scratch);
- loadPtr(Address(obj, JSObject::offsetOfShape()), scratch);
- loadPtr(Address(scratch, Shape::offsetOfBaseShape()), scratch);
- loadPtr(Address(scratch, BaseShape::offsetOfClasp()), scratch);
+ loadObjClassUnsafe(obj, scratch);
branchPtr(cond, clasp, scratch, label);
}
@@ -633,9 +629,7 @@ void MacroAssembler::branchTestObjClass(Condition cond, Register obj,
MOZ_ASSERT(obj != scratch);
MOZ_ASSERT(scratch != spectreRegToZero);
- loadPtr(Address(obj, JSObject::offsetOfShape()), scratch);
- loadPtr(Address(scratch, Shape::offsetOfBaseShape()), scratch);
- loadPtr(Address(scratch, BaseShape::offsetOfClasp()), scratch);
+ loadObjClassUnsafe(obj, scratch);
branchPtr(cond, clasp, scratch, label);
if (JitOptions.spectreObjectMitigations) {
@@ -643,20 +637,51 @@ void MacroAssembler::branchTestObjClass(Condition cond, Register obj,
}
}
-void MacroAssembler::branchTestClassIsFunction(Condition cond, Register clasp,
- Label* label) {
+void MacroAssembler::branchTestClass(
+ Condition cond, Register clasp,
+ std::pair<const JSClass*, const JSClass*> classes, Label* label) {
MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
if (cond == Assembler::Equal) {
- branchPtr(Assembler::Equal, clasp, ImmPtr(&FunctionClass), label);
- branchPtr(Assembler::Equal, clasp, ImmPtr(&ExtendedFunctionClass), label);
+ branchPtr(Assembler::Equal, clasp, ImmPtr(classes.first), label);
+ branchPtr(Assembler::Equal, clasp, ImmPtr(classes.second), label);
return;
}
- Label isFunction;
- branchPtr(Assembler::Equal, clasp, ImmPtr(&FunctionClass), &isFunction);
- branchPtr(Assembler::NotEqual, clasp, ImmPtr(&ExtendedFunctionClass), label);
- bind(&isFunction);
+ Label isClass;
+ branchPtr(Assembler::Equal, clasp, ImmPtr(classes.first), &isClass);
+ branchPtr(Assembler::NotEqual, clasp, ImmPtr(classes.second), label);
+ bind(&isClass);
+}
+
+void MacroAssembler::branchTestObjClass(
+ Condition cond, Register obj,
+ std::pair<const JSClass*, const JSClass*> classes, Register scratch,
+ Register spectreRegToZero, Label* label) {
+ MOZ_ASSERT(scratch != spectreRegToZero);
+
+ branchTestObjClassNoSpectreMitigations(cond, obj, classes, scratch, label);
+
+ if (JitOptions.spectreObjectMitigations) {
+ spectreZeroRegister(cond, scratch, spectreRegToZero);
+ }
+}
+
+void MacroAssembler::branchTestObjClassNoSpectreMitigations(
+ Condition cond, Register obj,
+ std::pair<const JSClass*, const JSClass*> classes, Register scratch,
+ Label* label) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ MOZ_ASSERT(obj != scratch);
+
+ loadObjClassUnsafe(obj, scratch);
+ branchTestClass(cond, scratch, classes, label);
+}
+
+void MacroAssembler::branchTestClassIsFunction(Condition cond, Register clasp,
+ Label* label) {
+ return branchTestClass(cond, clasp, {&FunctionClass, &ExtendedFunctionClass},
+ label);
}
void MacroAssembler::branchTestObjIsFunction(Condition cond, Register obj,
@@ -677,9 +702,7 @@ void MacroAssembler::branchTestObjIsFunctionNoSpectreMitigations(
MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
MOZ_ASSERT(obj != scratch);
- loadPtr(Address(obj, JSObject::offsetOfShape()), scratch);
- loadPtr(Address(scratch, Shape::offsetOfBaseShape()), scratch);
- loadPtr(Address(scratch, BaseShape::offsetOfClasp()), scratch);
+ loadObjClassUnsafe(obj, scratch);
branchTestClassIsFunction(cond, scratch, label);
}
diff --git a/js/src/jit/MacroAssembler.cpp b/js/src/jit/MacroAssembler.cpp
index 54da676014..3b094d49dc 100644
--- a/js/src/jit/MacroAssembler.cpp
+++ b/js/src/jit/MacroAssembler.cpp
@@ -2661,6 +2661,7 @@ void MacroAssembler::loadMegamorphicSetPropCache(Register dest) {
void MacroAssembler::lookupStringInAtomCacheLastLookups(Register str,
Register scratch,
+ Register output,
Label* fail) {
Label found;
@@ -2680,7 +2681,7 @@ void MacroAssembler::lookupStringInAtomCacheLastLookups(Register str,
// and jump back up to our usual atom handling code
bind(&found);
size_t atomOffset = StringToAtomCache::LastLookup::offsetOfAtom();
- loadPtr(Address(scratch, atomOffset), str);
+ loadPtr(Address(scratch, atomOffset), output);
}
void MacroAssembler::loadAtomHash(Register id, Register outHash, Label* done) {
@@ -2740,7 +2741,7 @@ void MacroAssembler::loadAtomOrSymbolAndHash(ValueOperand value, Register outId,
loadAtomHash(outId, outHash, &done);
bind(&nonAtom);
- lookupStringInAtomCacheLastLookups(outId, outHash, cacheMiss);
+ lookupStringInAtomCacheLastLookups(outId, outHash, outId, cacheMiss);
jump(&atom);
bind(&done);
@@ -3255,6 +3256,95 @@ void MacroAssembler::loadArrayBufferViewLengthIntPtr(Register obj,
loadPrivate(slotAddr, output);
}
+void MacroAssembler::loadGrowableSharedArrayBufferByteLengthIntPtr(
+ Synchronization sync, Register obj, Register output) {
+ // Load the SharedArrayRawBuffer.
+ loadPrivate(Address(obj, SharedArrayBufferObject::rawBufferOffset()), output);
+
+ memoryBarrierBefore(sync);
+
+ // Load the byteLength of the SharedArrayRawBuffer into |output|.
+ static_assert(sizeof(mozilla::Atomic<size_t>) == sizeof(size_t));
+ loadPtr(Address(output, SharedArrayRawBuffer::offsetOfByteLength()), output);
+
+ memoryBarrierAfter(sync);
+}
+
+void MacroAssembler::loadResizableArrayBufferViewLengthIntPtr(
+ ResizableArrayBufferView view, Synchronization sync, Register obj,
+ Register output, Register scratch) {
+ // Inline implementation of ArrayBufferViewObject::length(), when the input is
+ // guaranteed to be a resizable arraybuffer view object.
+
+ loadArrayBufferViewLengthIntPtr(obj, output);
+
+ Label done;
+ branchPtr(Assembler::NotEqual, output, ImmWord(0), &done);
+
+ // Load obj->elements in |scratch|.
+ loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
+
+ // If backed by non-shared memory, detached and out-of-bounds both return
+ // zero, so we're done here.
+ branchTest32(Assembler::Zero,
+ Address(scratch, ObjectElements::offsetOfFlags()),
+ Imm32(ObjectElements::SHARED_MEMORY), &done);
+
+ // Load the auto-length slot.
+ unboxBoolean(Address(obj, ArrayBufferViewObject::autoLengthOffset()),
+ scratch);
+
+ // If non-auto length, there's nothing to do.
+ branchTest32(Assembler::Zero, scratch, scratch, &done);
+
+ // Load bufferByteLength into |output|.
+ {
+ // Resizable TypedArrays are guaranteed to have an ArrayBuffer.
+ unboxObject(Address(obj, ArrayBufferViewObject::bufferOffset()), output);
+
+ // Load the byte length from the raw-buffer of growable SharedArrayBuffers.
+ loadGrowableSharedArrayBufferByteLengthIntPtr(sync, output, output);
+ }
+
+ // Load the byteOffset into |scratch|.
+ loadArrayBufferViewByteOffsetIntPtr(obj, scratch);
+
+ // Compute the accessible byte length |bufferByteLength - byteOffset|.
+ subPtr(scratch, output);
+
+ if (view == ResizableArrayBufferView::TypedArray) {
+ // Compute the array length from the byte length.
+ resizableTypedArrayElementShiftBy(obj, output, scratch);
+ }
+
+ bind(&done);
+}
+
+void MacroAssembler::loadResizableTypedArrayByteOffsetMaybeOutOfBoundsIntPtr(
+ Register obj, Register output, Register scratch) {
+ // Inline implementation of TypedArrayObject::byteOffsetMaybeOutOfBounds(),
+ // when the input is guaranteed to be a resizable typed array object.
+
+ loadArrayBufferViewByteOffsetIntPtr(obj, output);
+
+ // TypedArray is neither detached nor out-of-bounds when byteOffset non-zero.
+ Label done;
+ branchPtr(Assembler::NotEqual, output, ImmWord(0), &done);
+
+ // We're done when the initial byteOffset is zero.
+ loadPrivate(Address(obj, ArrayBufferViewObject::initialByteOffsetOffset()),
+ output);
+ branchPtr(Assembler::Equal, output, ImmWord(0), &done);
+
+ // If the buffer is attached, return initialByteOffset.
+ branchIfHasAttachedArrayBuffer(obj, scratch, &done);
+
+ // Otherwise return zero to match the result for fixed-length TypedArrays.
+ movePtr(ImmWord(0), output);
+
+ bind(&done);
+}
+
void MacroAssembler::loadDOMExpandoValueGuardGeneration(
Register obj, ValueOperand output,
JS::ExpandoAndGeneration* expandoAndGeneration, uint64_t generation,
@@ -7433,11 +7523,11 @@ void MacroAssembler::debugAssertCanonicalInt32(Register r) {
}
#endif
-void MacroAssembler::memoryBarrierBefore(const Synchronization& sync) {
+void MacroAssembler::memoryBarrierBefore(Synchronization sync) {
memoryBarrier(sync.barrierBefore);
}
-void MacroAssembler::memoryBarrierAfter(const Synchronization& sync) {
+void MacroAssembler::memoryBarrierAfter(Synchronization sync) {
memoryBarrier(sync.barrierAfter);
}
@@ -7834,6 +7924,74 @@ void MacroAssembler::typedArrayElementSize(Register obj, Register output) {
bind(&done);
}
+void MacroAssembler::resizableTypedArrayElementShiftBy(Register obj,
+ Register output,
+ Register scratch) {
+ loadObjClassUnsafe(obj, scratch);
+
+#ifdef DEBUG
+ Label invalidClass, validClass;
+ branchPtr(Assembler::Below, scratch,
+ ImmPtr(std::begin(TypedArrayObject::resizableClasses)),
+ &invalidClass);
+ branchPtr(Assembler::Below, scratch,
+ ImmPtr(std::end(TypedArrayObject::resizableClasses)), &validClass);
+ bind(&invalidClass);
+ assumeUnreachable("value isn't a valid ResizableLengthTypedArray class");
+ bind(&validClass);
+#endif
+
+ auto classForType = [](Scalar::Type type) {
+ MOZ_ASSERT(type < Scalar::MaxTypedArrayViewType);
+ return &TypedArrayObject::resizableClasses[type];
+ };
+
+ Label zero, one, two, three;
+
+ static_assert(ValidateSizeRange(Scalar::Int8, Scalar::Int16),
+ "element shift is zero in [Int8, Int16)");
+ branchPtr(Assembler::Below, scratch, ImmPtr(classForType(Scalar::Int16)),
+ &zero);
+
+ static_assert(ValidateSizeRange(Scalar::Int16, Scalar::Int32),
+ "element shift is one in [Int16, Int32)");
+ branchPtr(Assembler::Below, scratch, ImmPtr(classForType(Scalar::Int32)),
+ &one);
+
+ static_assert(ValidateSizeRange(Scalar::Int32, Scalar::Float64),
+ "element shift is two in [Int32, Float64)");
+ branchPtr(Assembler::Below, scratch, ImmPtr(classForType(Scalar::Float64)),
+ &two);
+
+ static_assert(ValidateSizeRange(Scalar::Float64, Scalar::Uint8Clamped),
+ "element shift is three in [Float64, Uint8Clamped)");
+ branchPtr(Assembler::Below, scratch,
+ ImmPtr(classForType(Scalar::Uint8Clamped)), &three);
+
+ static_assert(ValidateSizeRange(Scalar::Uint8Clamped, Scalar::BigInt64),
+ "element shift is zero in [Uint8Clamped, BigInt64)");
+ branchPtr(Assembler::Below, scratch, ImmPtr(classForType(Scalar::BigInt64)),
+ &zero);
+
+ static_assert(
+ ValidateSizeRange(Scalar::BigInt64, Scalar::MaxTypedArrayViewType),
+ "element shift is three in [BigInt64, MaxTypedArrayViewType)");
+ // Fall through for BigInt64 and BigUint64
+
+ bind(&three);
+ rshiftPtr(Imm32(3), output);
+ jump(&zero);
+
+ bind(&two);
+ rshiftPtr(Imm32(2), output);
+ jump(&zero);
+
+ bind(&one);
+ rshiftPtr(Imm32(1), output);
+
+ bind(&zero);
+}
+
void MacroAssembler::branchIfClassIsNotTypedArray(Register clasp,
Label* notTypedArray) {
// Inline implementation of IsTypedArrayClass().
@@ -7867,28 +8025,103 @@ void MacroAssembler::branchIfClassIsNotFixedLengthTypedArray(
notTypedArray);
}
-void MacroAssembler::branchIfHasDetachedArrayBuffer(Register obj, Register temp,
+void MacroAssembler::branchIfClassIsNotResizableTypedArray(
+ Register clasp, Label* notTypedArray) {
+ // Inline implementation of IsResizableTypedArrayClass().
+
+ const auto* firstTypedArrayClass =
+ std::begin(TypedArrayObject::resizableClasses);
+ const auto* lastTypedArrayClass =
+ std::prev(std::end(TypedArrayObject::resizableClasses));
+
+ branchPtr(Assembler::Below, clasp, ImmPtr(firstTypedArrayClass),
+ notTypedArray);
+ branchPtr(Assembler::Above, clasp, ImmPtr(lastTypedArrayClass),
+ notTypedArray);
+}
+
+void MacroAssembler::branchIfHasDetachedArrayBuffer(BranchIfDetached branchIf,
+ Register obj, Register temp,
Label* label) {
// Inline implementation of ArrayBufferViewObject::hasDetachedBuffer().
+ // TODO: The data-slot of detached views is set to undefined, which would be
+ // a faster way to detect detached buffers.
+
+ // auto cond = branchIf == BranchIfDetached::Yes ? Assembler::Equal
+ // : Assembler::NotEqual;
+ // branchTestUndefined(cond, Address(obj,
+ // ArrayBufferViewObject::dataOffset()), label);
+
+ Label done;
+ Label* ifNotDetached = branchIf == BranchIfDetached::Yes ? &done : label;
+ Condition detachedCond =
+ branchIf == BranchIfDetached::Yes ? Assembler::NonZero : Assembler::Zero;
+
// Load obj->elements in temp.
loadPtr(Address(obj, NativeObject::offsetOfElements()), temp);
// Shared buffers can't be detached.
- Label done;
branchTest32(Assembler::NonZero,
Address(temp, ObjectElements::offsetOfFlags()),
- Imm32(ObjectElements::SHARED_MEMORY), &done);
+ Imm32(ObjectElements::SHARED_MEMORY), ifNotDetached);
// An ArrayBufferView with a null/true buffer has never had its buffer
// exposed, so nothing can possibly detach it.
fallibleUnboxObject(Address(obj, ArrayBufferViewObject::bufferOffset()), temp,
- &done);
+ ifNotDetached);
- // Load the ArrayBuffer flags and branch if the detached flag is set.
+ // Load the ArrayBuffer flags and branch if the detached flag is (not) set.
unboxInt32(Address(temp, ArrayBufferObject::offsetOfFlagsSlot()), temp);
- branchTest32(Assembler::NonZero, temp, Imm32(ArrayBufferObject::DETACHED),
- label);
+ branchTest32(detachedCond, temp, Imm32(ArrayBufferObject::DETACHED), label);
+
+ if (branchIf == BranchIfDetached::Yes) {
+ bind(&done);
+ }
+}
+
+void MacroAssembler::branchIfResizableArrayBufferViewOutOfBounds(Register obj,
+ Register temp,
+ Label* label) {
+ // Implementation of ArrayBufferViewObject::isOutOfBounds().
+
+ Label done;
+
+ loadArrayBufferViewLengthIntPtr(obj, temp);
+ branchPtr(Assembler::NotEqual, temp, ImmWord(0), &done);
+
+ loadArrayBufferViewByteOffsetIntPtr(obj, temp);
+ branchPtr(Assembler::NotEqual, temp, ImmWord(0), &done);
+
+ loadPrivate(Address(obj, ArrayBufferViewObject::initialLengthOffset()), temp);
+ branchPtr(Assembler::NotEqual, temp, ImmWord(0), label);
+
+ loadPrivate(Address(obj, ArrayBufferViewObject::initialByteOffsetOffset()),
+ temp);
+ branchPtr(Assembler::NotEqual, temp, ImmWord(0), label);
+
+ bind(&done);
+}
+
+void MacroAssembler::branchIfResizableArrayBufferViewInBounds(Register obj,
+ Register temp,
+ Label* label) {
+ // Implementation of ArrayBufferViewObject::isOutOfBounds().
+
+ Label done;
+
+ loadArrayBufferViewLengthIntPtr(obj, temp);
+ branchPtr(Assembler::NotEqual, temp, ImmWord(0), label);
+
+ loadArrayBufferViewByteOffsetIntPtr(obj, temp);
+ branchPtr(Assembler::NotEqual, temp, ImmWord(0), label);
+
+ loadPrivate(Address(obj, ArrayBufferViewObject::initialLengthOffset()), temp);
+ branchPtr(Assembler::NotEqual, temp, ImmWord(0), &done);
+
+ loadPrivate(Address(obj, ArrayBufferViewObject::initialByteOffsetOffset()),
+ temp);
+ branchPtr(Assembler::Equal, temp, ImmWord(0), label);
bind(&done);
}
diff --git a/js/src/jit/MacroAssembler.h b/js/src/jit/MacroAssembler.h
index 43974a6ccc..361de3ac5f 100644
--- a/js/src/jit/MacroAssembler.h
+++ b/js/src/jit/MacroAssembler.h
@@ -13,6 +13,8 @@
#include "mozilla/Maybe.h"
#include "mozilla/Variant.h"
+#include <utility>
+
#if defined(JS_CODEGEN_X86)
# include "jit/x86/MacroAssembler-x86.h"
#elif defined(JS_CODEGEN_X64)
@@ -1784,6 +1786,21 @@ class MacroAssembler : public MacroAssemblerSpecific {
Register scratch, Register spectreRegToZero,
Label* label);
+ private:
+ inline void branchTestClass(Condition cond, Register clasp,
+ std::pair<const JSClass*, const JSClass*> classes,
+ Label* label);
+
+ public:
+ inline void branchTestObjClass(
+ Condition cond, Register obj,
+ std::pair<const JSClass*, const JSClass*> classes, Register scratch,
+ Register spectreRegToZero, Label* label);
+ inline void branchTestObjClassNoSpectreMitigations(
+ Condition cond, Register obj,
+ std::pair<const JSClass*, const JSClass*> classes, Register scratch,
+ Label* label);
+
inline void branchTestObjShape(Condition cond, Register obj,
const Shape* shape, Register scratch,
Register spectreRegToZero, Label* label);
@@ -4191,23 +4208,23 @@ class MacroAssembler : public MacroAssemblerSpecific {
// MIPS: `valueTemp`, `offsetTemp` and `maskTemp` must be defined for 8-bit
// and 16-bit wide operations.
- void compareExchange(Scalar::Type type, const Synchronization& sync,
+ void compareExchange(Scalar::Type type, Synchronization sync,
const Address& mem, Register expected,
Register replacement, Register output)
DEFINED_ON(arm, arm64, x86_shared);
- void compareExchange(Scalar::Type type, const Synchronization& sync,
+ void compareExchange(Scalar::Type type, Synchronization sync,
const BaseIndex& mem, Register expected,
Register replacement, Register output)
DEFINED_ON(arm, arm64, x86_shared);
- void compareExchange(Scalar::Type type, const Synchronization& sync,
+ void compareExchange(Scalar::Type type, Synchronization sync,
const Address& mem, Register expected,
Register replacement, Register valueTemp,
Register offsetTemp, Register maskTemp, Register output)
DEFINED_ON(mips_shared, loong64, riscv64);
- void compareExchange(Scalar::Type type, const Synchronization& sync,
+ void compareExchange(Scalar::Type type, Synchronization sync,
const BaseIndex& mem, Register expected,
Register replacement, Register valueTemp,
Register offsetTemp, Register maskTemp, Register output)
@@ -4218,12 +4235,12 @@ class MacroAssembler : public MacroAssemblerSpecific {
// ARM: Registers must be distinct; `replacement` and `output` must be
// (even,odd) pairs.
- void compareExchange64(const Synchronization& sync, const Address& mem,
+ void compareExchange64(Synchronization sync, const Address& mem,
Register64 expected, Register64 replacement,
Register64 output)
DEFINED_ON(arm, arm64, x64, x86, mips64, loong64, riscv64);
- void compareExchange64(const Synchronization& sync, const BaseIndex& mem,
+ void compareExchange64(Synchronization sync, const BaseIndex& mem,
Register64 expected, Register64 replacement,
Register64 output)
DEFINED_ON(arm, arm64, x64, x86, mips64, loong64, riscv64);
@@ -4232,20 +4249,20 @@ class MacroAssembler : public MacroAssemblerSpecific {
// MIPS: `valueTemp`, `offsetTemp` and `maskTemp` must be defined for 8-bit
// and 16-bit wide operations.
- void atomicExchange(Scalar::Type type, const Synchronization& sync,
+ void atomicExchange(Scalar::Type type, Synchronization sync,
const Address& mem, Register value, Register output)
DEFINED_ON(arm, arm64, x86_shared);
- void atomicExchange(Scalar::Type type, const Synchronization& sync,
+ void atomicExchange(Scalar::Type type, Synchronization sync,
const BaseIndex& mem, Register value, Register output)
DEFINED_ON(arm, arm64, x86_shared);
- void atomicExchange(Scalar::Type type, const Synchronization& sync,
+ void atomicExchange(Scalar::Type type, Synchronization sync,
const Address& mem, Register value, Register valueTemp,
Register offsetTemp, Register maskTemp, Register output)
DEFINED_ON(mips_shared, loong64, riscv64);
- void atomicExchange(Scalar::Type type, const Synchronization& sync,
+ void atomicExchange(Scalar::Type type, Synchronization sync,
const BaseIndex& mem, Register value, Register valueTemp,
Register offsetTemp, Register maskTemp, Register output)
DEFINED_ON(mips_shared, loong64, riscv64);
@@ -4254,11 +4271,11 @@ class MacroAssembler : public MacroAssemblerSpecific {
// ARM: `value` and `output` must be distinct and (even,odd) pairs.
// ARM64: `value` and `output` must be distinct.
- void atomicExchange64(const Synchronization& sync, const Address& mem,
+ void atomicExchange64(Synchronization sync, const Address& mem,
Register64 value, Register64 output)
DEFINED_ON(arm, arm64, x64, x86, mips64, loong64, riscv64);
- void atomicExchange64(const Synchronization& sync, const BaseIndex& mem,
+ void atomicExchange64(Synchronization sync, const BaseIndex& mem,
Register64 value, Register64 output)
DEFINED_ON(arm, arm64, x64, x86, mips64, loong64, riscv64);
@@ -4275,33 +4292,31 @@ class MacroAssembler : public MacroAssemblerSpecific {
// MIPS: `valueTemp`, `offsetTemp` and `maskTemp` must be defined for 8-bit
// and 16-bit wide operations; `value` and `output` must differ.
- void atomicFetchOp(Scalar::Type type, const Synchronization& sync,
- AtomicOp op, Register value, const Address& mem,
- Register temp, Register output)
- DEFINED_ON(arm, arm64, x86_shared);
+ void atomicFetchOp(Scalar::Type type, Synchronization sync, AtomicOp op,
+ Register value, const Address& mem, Register temp,
+ Register output) DEFINED_ON(arm, arm64, x86_shared);
- void atomicFetchOp(Scalar::Type type, const Synchronization& sync,
- AtomicOp op, Imm32 value, const Address& mem,
- Register temp, Register output) DEFINED_ON(x86_shared);
+ void atomicFetchOp(Scalar::Type type, Synchronization sync, AtomicOp op,
+ Imm32 value, const Address& mem, Register temp,
+ Register output) DEFINED_ON(x86_shared);
- void atomicFetchOp(Scalar::Type type, const Synchronization& sync,
- AtomicOp op, Register value, const BaseIndex& mem,
- Register temp, Register output)
- DEFINED_ON(arm, arm64, x86_shared);
+ void atomicFetchOp(Scalar::Type type, Synchronization sync, AtomicOp op,
+ Register value, const BaseIndex& mem, Register temp,
+ Register output) DEFINED_ON(arm, arm64, x86_shared);
- void atomicFetchOp(Scalar::Type type, const Synchronization& sync,
- AtomicOp op, Imm32 value, const BaseIndex& mem,
- Register temp, Register output) DEFINED_ON(x86_shared);
+ void atomicFetchOp(Scalar::Type type, Synchronization sync, AtomicOp op,
+ Imm32 value, const BaseIndex& mem, Register temp,
+ Register output) DEFINED_ON(x86_shared);
- void atomicFetchOp(Scalar::Type type, const Synchronization& sync,
- AtomicOp op, Register value, const Address& mem,
- Register valueTemp, Register offsetTemp, Register maskTemp,
- Register output) DEFINED_ON(mips_shared, loong64, riscv64);
+ void atomicFetchOp(Scalar::Type type, Synchronization sync, AtomicOp op,
+ Register value, const Address& mem, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output)
+ DEFINED_ON(mips_shared, loong64, riscv64);
- void atomicFetchOp(Scalar::Type type, const Synchronization& sync,
- AtomicOp op, Register value, const BaseIndex& mem,
- Register valueTemp, Register offsetTemp, Register maskTemp,
- Register output) DEFINED_ON(mips_shared, loong64, riscv64);
+ void atomicFetchOp(Scalar::Type type, Synchronization sync, AtomicOp op,
+ Register value, const BaseIndex& mem, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output)
+ DEFINED_ON(mips_shared, loong64, riscv64);
// x86:
// `temp` must be ecx:ebx; `output` must be edx:eax.
@@ -4313,23 +4328,21 @@ class MacroAssembler : public MacroAssemblerSpecific {
// ARM64:
// Registers `value`, `temp`, and `output` must all differ.
- void atomicFetchOp64(const Synchronization& sync, AtomicOp op,
- Register64 value, const Address& mem, Register64 temp,
- Register64 output)
+ void atomicFetchOp64(Synchronization sync, AtomicOp op, Register64 value,
+ const Address& mem, Register64 temp, Register64 output)
DEFINED_ON(arm, arm64, x64, mips64, loong64, riscv64);
- void atomicFetchOp64(const Synchronization& sync, AtomicOp op,
- const Address& value, const Address& mem,
- Register64 temp, Register64 output) DEFINED_ON(x86);
+ void atomicFetchOp64(Synchronization sync, AtomicOp op, const Address& value,
+ const Address& mem, Register64 temp, Register64 output)
+ DEFINED_ON(x86);
- void atomicFetchOp64(const Synchronization& sync, AtomicOp op,
- Register64 value, const BaseIndex& mem, Register64 temp,
- Register64 output)
+ void atomicFetchOp64(Synchronization sync, AtomicOp op, Register64 value,
+ const BaseIndex& mem, Register64 temp, Register64 output)
DEFINED_ON(arm, arm64, x64, mips64, loong64, riscv64);
- void atomicFetchOp64(const Synchronization& sync, AtomicOp op,
- const Address& value, const BaseIndex& mem,
- Register64 temp, Register64 output) DEFINED_ON(x86);
+ void atomicFetchOp64(Synchronization sync, AtomicOp op, const Address& value,
+ const BaseIndex& mem, Register64 temp, Register64 output)
+ DEFINED_ON(x86);
// x64:
// `value` can be any register.
@@ -4338,18 +4351,18 @@ class MacroAssembler : public MacroAssemblerSpecific {
// ARM64:
// Registers `value` and `temp` must differ.
- void atomicEffectOp64(const Synchronization& sync, AtomicOp op,
- Register64 value, const Address& mem) DEFINED_ON(x64);
+ void atomicEffectOp64(Synchronization sync, AtomicOp op, Register64 value,
+ const Address& mem) DEFINED_ON(x64);
- void atomicEffectOp64(const Synchronization& sync, AtomicOp op,
- Register64 value, const Address& mem, Register64 temp)
+ void atomicEffectOp64(Synchronization sync, AtomicOp op, Register64 value,
+ const Address& mem, Register64 temp)
DEFINED_ON(arm, arm64, mips64, loong64, riscv64);
- void atomicEffectOp64(const Synchronization& sync, AtomicOp op,
- Register64 value, const BaseIndex& mem) DEFINED_ON(x64);
+ void atomicEffectOp64(Synchronization sync, AtomicOp op, Register64 value,
+ const BaseIndex& mem) DEFINED_ON(x64);
- void atomicEffectOp64(const Synchronization& sync, AtomicOp op,
- Register64 value, const BaseIndex& mem, Register64 temp)
+ void atomicEffectOp64(Synchronization sync, AtomicOp op, Register64 value,
+ const BaseIndex& mem, Register64 temp)
DEFINED_ON(arm, arm64, mips64, loong64, riscv64);
// 64-bit atomic load. On 64-bit systems, use regular load with
@@ -4358,16 +4371,16 @@ class MacroAssembler : public MacroAssemblerSpecific {
// x86: `temp` must be ecx:ebx; `output` must be edx:eax.
// ARM: `output` must be (even,odd) pair.
- void atomicLoad64(const Synchronization& sync, const Address& mem,
- Register64 temp, Register64 output) DEFINED_ON(x86);
+ void atomicLoad64(Synchronization sync, const Address& mem, Register64 temp,
+ Register64 output) DEFINED_ON(x86);
- void atomicLoad64(const Synchronization& sync, const BaseIndex& mem,
- Register64 temp, Register64 output) DEFINED_ON(x86);
+ void atomicLoad64(Synchronization sync, const BaseIndex& mem, Register64 temp,
+ Register64 output) DEFINED_ON(x86);
- void atomicLoad64(const Synchronization& sync, const Address& mem,
- Register64 output) DEFINED_ON(arm);
+ void atomicLoad64(Synchronization sync, const Address& mem, Register64 output)
+ DEFINED_ON(arm);
- void atomicLoad64(const Synchronization& sync, const BaseIndex& mem,
+ void atomicLoad64(Synchronization sync, const BaseIndex& mem,
Register64 output) DEFINED_ON(arm);
// 64-bit atomic store. On 64-bit systems, use regular store with
@@ -4376,10 +4389,10 @@ class MacroAssembler : public MacroAssemblerSpecific {
// x86: `value` must be ecx:ebx; `temp` must be edx:eax.
// ARM: `value` and `temp` must be (even,odd) pairs.
- void atomicStore64(const Synchronization& sync, const Address& mem,
- Register64 value, Register64 temp) DEFINED_ON(x86, arm);
+ void atomicStore64(Synchronization sync, const Address& mem, Register64 value,
+ Register64 temp) DEFINED_ON(x86, arm);
- void atomicStore64(const Synchronization& sync, const BaseIndex& mem,
+ void atomicStore64(Synchronization sync, const BaseIndex& mem,
Register64 value, Register64 temp) DEFINED_ON(x86, arm);
// ========================================================================
@@ -4594,105 +4607,105 @@ class MacroAssembler : public MacroAssemblerSpecific {
// For additional register constraints, see the primitive 32-bit operations
// and/or wasm operations above.
- void compareExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
+ void compareExchangeJS(Scalar::Type arrayType, Synchronization sync,
const Address& mem, Register expected,
Register replacement, Register temp,
AnyRegister output) DEFINED_ON(arm, arm64, x86_shared);
- void compareExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
+ void compareExchangeJS(Scalar::Type arrayType, Synchronization sync,
const BaseIndex& mem, Register expected,
Register replacement, Register temp,
AnyRegister output) DEFINED_ON(arm, arm64, x86_shared);
- void compareExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
+ void compareExchangeJS(Scalar::Type arrayType, Synchronization sync,
const Address& mem, Register expected,
Register replacement, Register valueTemp,
Register offsetTemp, Register maskTemp, Register temp,
AnyRegister output)
DEFINED_ON(mips_shared, loong64, riscv64);
- void compareExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
+ void compareExchangeJS(Scalar::Type arrayType, Synchronization sync,
const BaseIndex& mem, Register expected,
Register replacement, Register valueTemp,
Register offsetTemp, Register maskTemp, Register temp,
AnyRegister output)
DEFINED_ON(mips_shared, loong64, riscv64);
- void atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
+ void atomicExchangeJS(Scalar::Type arrayType, Synchronization sync,
const Address& mem, Register value, Register temp,
AnyRegister output) DEFINED_ON(arm, arm64, x86_shared);
- void atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
+ void atomicExchangeJS(Scalar::Type arrayType, Synchronization sync,
const BaseIndex& mem, Register value, Register temp,
AnyRegister output) DEFINED_ON(arm, arm64, x86_shared);
- void atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
+ void atomicExchangeJS(Scalar::Type arrayType, Synchronization sync,
const Address& mem, Register value, Register valueTemp,
Register offsetTemp, Register maskTemp, Register temp,
AnyRegister output)
DEFINED_ON(mips_shared, loong64, riscv64);
- void atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
+ void atomicExchangeJS(Scalar::Type arrayType, Synchronization sync,
const BaseIndex& mem, Register value,
Register valueTemp, Register offsetTemp,
Register maskTemp, Register temp, AnyRegister output)
DEFINED_ON(mips_shared, loong64, riscv64);
- void atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync,
+ void atomicFetchOpJS(Scalar::Type arrayType, Synchronization sync,
AtomicOp op, Register value, const Address& mem,
Register temp1, Register temp2, AnyRegister output)
DEFINED_ON(arm, arm64, x86_shared);
- void atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync,
+ void atomicFetchOpJS(Scalar::Type arrayType, Synchronization sync,
AtomicOp op, Register value, const BaseIndex& mem,
Register temp1, Register temp2, AnyRegister output)
DEFINED_ON(arm, arm64, x86_shared);
- void atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync,
+ void atomicFetchOpJS(Scalar::Type arrayType, Synchronization sync,
AtomicOp op, Imm32 value, const Address& mem,
Register temp1, Register temp2, AnyRegister output)
DEFINED_ON(x86_shared);
- void atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync,
+ void atomicFetchOpJS(Scalar::Type arrayType, Synchronization sync,
AtomicOp op, Imm32 value, const BaseIndex& mem,
Register temp1, Register temp2, AnyRegister output)
DEFINED_ON(x86_shared);
- void atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync,
+ void atomicFetchOpJS(Scalar::Type arrayType, Synchronization sync,
AtomicOp op, Register value, const Address& mem,
Register valueTemp, Register offsetTemp,
Register maskTemp, Register temp, AnyRegister output)
DEFINED_ON(mips_shared, loong64, riscv64);
- void atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync,
+ void atomicFetchOpJS(Scalar::Type arrayType, Synchronization sync,
AtomicOp op, Register value, const BaseIndex& mem,
Register valueTemp, Register offsetTemp,
Register maskTemp, Register temp, AnyRegister output)
DEFINED_ON(mips_shared, loong64, riscv64);
- void atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync,
+ void atomicEffectOpJS(Scalar::Type arrayType, Synchronization sync,
AtomicOp op, Register value, const Address& mem,
Register temp) DEFINED_ON(arm, arm64, x86_shared);
- void atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync,
+ void atomicEffectOpJS(Scalar::Type arrayType, Synchronization sync,
AtomicOp op, Register value, const BaseIndex& mem,
Register temp) DEFINED_ON(arm, arm64, x86_shared);
- void atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync,
+ void atomicEffectOpJS(Scalar::Type arrayType, Synchronization sync,
AtomicOp op, Imm32 value, const Address& mem,
Register temp) DEFINED_ON(x86_shared);
- void atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync,
+ void atomicEffectOpJS(Scalar::Type arrayType, Synchronization sync,
AtomicOp op, Imm32 value, const BaseIndex& mem,
Register temp) DEFINED_ON(x86_shared);
- void atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync,
+ void atomicEffectOpJS(Scalar::Type arrayType, Synchronization sync,
AtomicOp op, Register value, const Address& mem,
Register valueTemp, Register offsetTemp,
Register maskTemp)
DEFINED_ON(mips_shared, loong64, riscv64);
- void atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync,
+ void atomicEffectOpJS(Scalar::Type arrayType, Synchronization sync,
AtomicOp op, Register value, const BaseIndex& mem,
Register valueTemp, Register offsetTemp,
Register maskTemp)
@@ -5245,8 +5258,8 @@ class MacroAssembler : public MacroAssemblerSpecific {
void storeToTypedBigIntArray(Scalar::Type arrayType, Register64 value,
const Address& dest);
- void memoryBarrierBefore(const Synchronization& sync);
- void memoryBarrierAfter(const Synchronization& sync);
+ void memoryBarrierBefore(Synchronization sync);
+ void memoryBarrierAfter(Synchronization sync);
void debugAssertIsObject(const ValueOperand& val);
void debugAssertObjHasFixedSlots(Register obj, Register scratch);
@@ -5284,12 +5297,41 @@ class MacroAssembler : public MacroAssemblerSpecific {
Label* label);
void typedArrayElementSize(Register obj, Register output);
+
+ private:
+ // Shift |output| by the element shift of the ResizableTypedArray in |obj|.
+ void resizableTypedArrayElementShiftBy(Register obj, Register output,
+ Register scratch);
+
+ public:
void branchIfClassIsNotTypedArray(Register clasp, Label* notTypedArray);
void branchIfClassIsNotFixedLengthTypedArray(Register clasp,
Label* notTypedArray);
+ void branchIfClassIsNotResizableTypedArray(Register clasp,
+ Label* notTypedArray);
+
+ private:
+ enum class BranchIfDetached { No, Yes };
+
+ void branchIfHasDetachedArrayBuffer(BranchIfDetached branchIf, Register obj,
+ Register temp, Label* label);
+ public:
void branchIfHasDetachedArrayBuffer(Register obj, Register temp,
- Label* label);
+ Label* label) {
+ branchIfHasDetachedArrayBuffer(BranchIfDetached::Yes, obj, temp, label);
+ }
+
+ void branchIfHasAttachedArrayBuffer(Register obj, Register temp,
+ Label* label) {
+ branchIfHasDetachedArrayBuffer(BranchIfDetached::No, obj, temp, label);
+ }
+
+ void branchIfResizableArrayBufferViewOutOfBounds(Register obj, Register temp,
+ Label* label);
+
+ void branchIfResizableArrayBufferViewInBounds(Register obj, Register temp,
+ Label* label);
void branchIfNativeIteratorNotReusable(Register ni, Label* notReusable);
void branchNativeIteratorIndices(Condition cond, Register ni, Register temp,
@@ -5560,7 +5602,7 @@ class MacroAssembler : public MacroAssemblerSpecific {
void loadMegamorphicCache(Register dest);
void lookupStringInAtomCacheLastLookups(Register str, Register scratch,
- Label* fail);
+ Register output, Label* fail);
void loadMegamorphicSetPropCache(Register dest);
void loadAtomOrSymbolAndHash(ValueOperand value, Register outId,
@@ -5627,6 +5669,35 @@ class MacroAssembler : public MacroAssemblerSpecific {
void loadArrayBufferViewByteOffsetIntPtr(Register obj, Register output);
void loadArrayBufferViewLengthIntPtr(Register obj, Register output);
+ void loadGrowableSharedArrayBufferByteLengthIntPtr(Synchronization sync,
+ Register obj,
+ Register output);
+
+ private:
+ enum class ResizableArrayBufferView { TypedArray, DataView };
+
+ void loadResizableArrayBufferViewLengthIntPtr(ResizableArrayBufferView view,
+ Synchronization sync,
+ Register obj, Register output,
+ Register scratch);
+
+ public:
+ void loadResizableTypedArrayLengthIntPtr(Synchronization sync, Register obj,
+ Register output, Register scratch) {
+ loadResizableArrayBufferViewLengthIntPtr(
+ ResizableArrayBufferView::TypedArray, sync, obj, output, scratch);
+ }
+
+ void loadResizableDataViewByteLengthIntPtr(Synchronization sync, Register obj,
+ Register output,
+ Register scratch) {
+ loadResizableArrayBufferViewLengthIntPtr(ResizableArrayBufferView::DataView,
+ sync, obj, output, scratch);
+ }
+
+ void loadResizableTypedArrayByteOffsetMaybeOutOfBoundsIntPtr(
+ Register obj, Register output, Register scratch);
+
private:
void isCallableOrConstructor(bool isCallable, Register obj, Register output,
Label* isProxy);
diff --git a/js/src/jit/PcScriptCache.h b/js/src/jit/PcScriptCache.h
deleted file mode 100644
index c83c479c85..0000000000
--- a/js/src/jit/PcScriptCache.h
+++ /dev/null
@@ -1,88 +0,0 @@
-/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
- * vim: set ts=8 sts=2 et sw=2 tw=80:
- * This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#ifndef jit_PcScriptCache_h
-#define jit_PcScriptCache_h
-
-#include "mozilla/Array.h"
-#include "js/TypeDecls.h"
-#include "vm/Runtime.h"
-
-// Defines a fixed-size hash table solely for the purpose of caching
-// jit::GetPcScript(). One cache is attached to each JSRuntime; it functions as
-// if cleared on GC.
-
-namespace js {
-namespace jit {
-
-struct PcScriptCacheEntry {
- uint8_t* returnAddress; // Key into the hash table.
- jsbytecode* pc; // Cached PC.
- JSScript* script; // Cached script.
-};
-
-struct PcScriptCache {
- private:
- static const uint32_t Length = 73;
-
- // GC number at the time the cache was filled or created.
- // Storing and checking against this number allows us to not bother
- // clearing this cache on every GC -- only when actually necessary.
- uint64_t gcNumber;
-
- // List of cache entries.
- mozilla::Array<PcScriptCacheEntry, Length> entries;
-
- public:
- explicit PcScriptCache(uint64_t gcNumber) { clear(gcNumber); }
-
- void clear(uint64_t gcNumber) {
- for (uint32_t i = 0; i < Length; i++) {
- entries[i].returnAddress = nullptr;
- }
- this->gcNumber = gcNumber;
- }
-
- // Get a value from the cache. May perform lazy allocation.
- [[nodiscard]] bool get(JSRuntime* rt, uint32_t hash, uint8_t* addr,
- JSScript** scriptRes, jsbytecode** pcRes) {
- // If a GC occurred, lazily clear the cache now.
- if (gcNumber != rt->gc.gcNumber()) {
- clear(rt->gc.gcNumber());
- return false;
- }
-
- if (entries[hash].returnAddress != addr) {
- return false;
- }
-
- *scriptRes = entries[hash].script;
- if (pcRes) {
- *pcRes = entries[hash].pc;
- }
-
- return true;
- }
-
- void add(uint32_t hash, uint8_t* addr, jsbytecode* pc, JSScript* script) {
- MOZ_ASSERT(addr);
- MOZ_ASSERT(pc);
- MOZ_ASSERT(script);
- entries[hash].returnAddress = addr;
- entries[hash].pc = pc;
- entries[hash].script = script;
- }
-
- static uint32_t Hash(uint8_t* addr) {
- uint32_t key = (uint32_t)((uintptr_t)addr);
- return ((key >> 3) * 2654435761u) % Length;
- }
-};
-
-} // namespace jit
-} // namespace js
-
-#endif /* jit_PcScriptCache_h */
diff --git a/js/src/jit/RangeAnalysis.cpp b/js/src/jit/RangeAnalysis.cpp
index 4ed15daabb..bd8380a690 100644
--- a/js/src/jit/RangeAnalysis.cpp
+++ b/js/src/jit/RangeAnalysis.cpp
@@ -1802,6 +1802,25 @@ void MArrayBufferViewByteOffset::computeRange(TempAllocator& alloc) {
}
}
+void MResizableTypedArrayByteOffsetMaybeOutOfBounds::computeRange(
+ TempAllocator& alloc) {
+ if constexpr (ArrayBufferObject::ByteLengthLimit <= INT32_MAX) {
+ setRange(Range::NewUInt32Range(alloc, 0, INT32_MAX));
+ }
+}
+
+void MResizableTypedArrayLength::computeRange(TempAllocator& alloc) {
+ if constexpr (ArrayBufferObject::ByteLengthLimit <= INT32_MAX) {
+ setRange(Range::NewUInt32Range(alloc, 0, INT32_MAX));
+ }
+}
+
+void MResizableDataViewByteLength::computeRange(TempAllocator& alloc) {
+ if constexpr (ArrayBufferObject::ByteLengthLimit <= INT32_MAX) {
+ setRange(Range::NewUInt32Range(alloc, 0, INT32_MAX));
+ }
+}
+
void MTypedArrayElementSize::computeRange(TempAllocator& alloc) {
constexpr auto MaxTypedArraySize = sizeof(double);
diff --git a/js/src/jit/Recover.cpp b/js/src/jit/Recover.cpp
index e70722bffe..220ffe7bb2 100644
--- a/js/src/jit/Recover.cpp
+++ b/js/src/jit/Recover.cpp
@@ -11,6 +11,7 @@
#include "builtin/Object.h"
#include "builtin/RegExp.h"
#include "builtin/String.h"
+#include "jit/AtomicOperations.h"
#include "jit/Bailouts.h"
#include "jit/CompileInfo.h"
#include "jit/Ion.h"
@@ -2013,15 +2014,11 @@ bool MAtomicIsLockFree::writeRecoverData(CompactBufferWriter& writer) const {
RAtomicIsLockFree::RAtomicIsLockFree(CompactBufferReader& reader) {}
bool RAtomicIsLockFree::recover(JSContext* cx, SnapshotIterator& iter) const {
- RootedValue operand(cx, iter.read());
+ Value operand = iter.read();
MOZ_ASSERT(operand.isInt32());
- int32_t result;
- if (!js::AtomicIsLockFree(cx, operand, &result)) {
- return false;
- }
-
- iter.storeInstructionResult(Int32Value(result));
+ bool result = AtomicOperations::isLockfreeJS(operand.toInt32());
+ iter.storeInstructionResult(BooleanValue(result));
return true;
}
diff --git a/js/src/jit/Registers.h b/js/src/jit/Registers.h
index 1ae9c1954c..43e94e6bff 100644
--- a/js/src/jit/Registers.h
+++ b/js/src/jit/Registers.h
@@ -205,6 +205,7 @@ struct Register64 {
return high != other.high || low != other.low;
}
Register scratchReg() { return high; }
+ Register secondScratchReg() { return low; }
static Register64 Invalid() {
return Register64(Register::Invalid(), Register::Invalid());
}
diff --git a/js/src/jit/VMFunctions.cpp b/js/src/jit/VMFunctions.cpp
index 29777d08c7..ed3f63c88c 100644
--- a/js/src/jit/VMFunctions.cpp
+++ b/js/src/jit/VMFunctions.cpp
@@ -2557,13 +2557,14 @@ BigInt* BigIntAsUintN(JSContext* cx, HandleBigInt x, int32_t bits) {
}
template <typename T>
-static int32_t AtomicsCompareExchange(FixedLengthTypedArrayObject* typedArray,
+static int32_t AtomicsCompareExchange(TypedArrayObject* typedArray,
size_t index, int32_t expected,
int32_t replacement) {
AutoUnsafeCallWithABI unsafe;
MOZ_ASSERT(!typedArray->hasDetachedBuffer());
- MOZ_ASSERT(index < typedArray->length());
+ MOZ_ASSERT_IF(typedArray->hasResizableBuffer(), !typedArray->isOutOfBounds());
+ MOZ_ASSERT(index < typedArray->length().valueOr(0));
SharedMem<T*> addr = typedArray->dataPointerEither().cast<T*>();
return jit::AtomicOperations::compareExchangeSeqCst(addr + index, T(expected),
@@ -2590,12 +2591,13 @@ AtomicsCompareExchangeFn AtomicsCompareExchange(Scalar::Type elementType) {
}
template <typename T>
-static int32_t AtomicsExchange(FixedLengthTypedArrayObject* typedArray,
- size_t index, int32_t value) {
+static int32_t AtomicsExchange(TypedArrayObject* typedArray, size_t index,
+ int32_t value) {
AutoUnsafeCallWithABI unsafe;
MOZ_ASSERT(!typedArray->hasDetachedBuffer());
- MOZ_ASSERT(index < typedArray->length());
+ MOZ_ASSERT_IF(typedArray->hasResizableBuffer(), !typedArray->isOutOfBounds());
+ MOZ_ASSERT(index < typedArray->length().valueOr(0));
SharedMem<T*> addr = typedArray->dataPointerEither().cast<T*>();
return jit::AtomicOperations::exchangeSeqCst(addr + index, T(value));
@@ -2621,12 +2623,13 @@ AtomicsReadWriteModifyFn AtomicsExchange(Scalar::Type elementType) {
}
template <typename T>
-static int32_t AtomicsAdd(FixedLengthTypedArrayObject* typedArray, size_t index,
+static int32_t AtomicsAdd(TypedArrayObject* typedArray, size_t index,
int32_t value) {
AutoUnsafeCallWithABI unsafe;
MOZ_ASSERT(!typedArray->hasDetachedBuffer());
- MOZ_ASSERT(index < typedArray->length());
+ MOZ_ASSERT_IF(typedArray->hasResizableBuffer(), !typedArray->isOutOfBounds());
+ MOZ_ASSERT(index < typedArray->length().valueOr(0));
SharedMem<T*> addr = typedArray->dataPointerEither().cast<T*>();
return jit::AtomicOperations::fetchAddSeqCst(addr + index, T(value));
@@ -2652,12 +2655,13 @@ AtomicsReadWriteModifyFn AtomicsAdd(Scalar::Type elementType) {
}
template <typename T>
-static int32_t AtomicsSub(FixedLengthTypedArrayObject* typedArray, size_t index,
+static int32_t AtomicsSub(TypedArrayObject* typedArray, size_t index,
int32_t value) {
AutoUnsafeCallWithABI unsafe;
MOZ_ASSERT(!typedArray->hasDetachedBuffer());
- MOZ_ASSERT(index < typedArray->length());
+ MOZ_ASSERT_IF(typedArray->hasResizableBuffer(), !typedArray->isOutOfBounds());
+ MOZ_ASSERT(index < typedArray->length().valueOr(0));
SharedMem<T*> addr = typedArray->dataPointerEither().cast<T*>();
return jit::AtomicOperations::fetchSubSeqCst(addr + index, T(value));
@@ -2683,12 +2687,13 @@ AtomicsReadWriteModifyFn AtomicsSub(Scalar::Type elementType) {
}
template <typename T>
-static int32_t AtomicsAnd(FixedLengthTypedArrayObject* typedArray, size_t index,
+static int32_t AtomicsAnd(TypedArrayObject* typedArray, size_t index,
int32_t value) {
AutoUnsafeCallWithABI unsafe;
MOZ_ASSERT(!typedArray->hasDetachedBuffer());
- MOZ_ASSERT(index < typedArray->length());
+ MOZ_ASSERT_IF(typedArray->hasResizableBuffer(), !typedArray->isOutOfBounds());
+ MOZ_ASSERT(index < typedArray->length().valueOr(0));
SharedMem<T*> addr = typedArray->dataPointerEither().cast<T*>();
return jit::AtomicOperations::fetchAndSeqCst(addr + index, T(value));
@@ -2714,12 +2719,13 @@ AtomicsReadWriteModifyFn AtomicsAnd(Scalar::Type elementType) {
}
template <typename T>
-static int32_t AtomicsOr(FixedLengthTypedArrayObject* typedArray, size_t index,
+static int32_t AtomicsOr(TypedArrayObject* typedArray, size_t index,
int32_t value) {
AutoUnsafeCallWithABI unsafe;
MOZ_ASSERT(!typedArray->hasDetachedBuffer());
- MOZ_ASSERT(index < typedArray->length());
+ MOZ_ASSERT_IF(typedArray->hasResizableBuffer(), !typedArray->isOutOfBounds());
+ MOZ_ASSERT(index < typedArray->length().valueOr(0));
SharedMem<T*> addr = typedArray->dataPointerEither().cast<T*>();
return jit::AtomicOperations::fetchOrSeqCst(addr + index, T(value));
@@ -2745,12 +2751,13 @@ AtomicsReadWriteModifyFn AtomicsOr(Scalar::Type elementType) {
}
template <typename T>
-static int32_t AtomicsXor(FixedLengthTypedArrayObject* typedArray, size_t index,
+static int32_t AtomicsXor(TypedArrayObject* typedArray, size_t index,
int32_t value) {
AutoUnsafeCallWithABI unsafe;
MOZ_ASSERT(!typedArray->hasDetachedBuffer());
- MOZ_ASSERT(index < typedArray->length());
+ MOZ_ASSERT_IF(typedArray->hasResizableBuffer(), !typedArray->isOutOfBounds());
+ MOZ_ASSERT(index < typedArray->length().valueOr(0));
SharedMem<T*> addr = typedArray->dataPointerEither().cast<T*>();
return jit::AtomicOperations::fetchXorSeqCst(addr + index, T(value));
@@ -2776,12 +2783,12 @@ AtomicsReadWriteModifyFn AtomicsXor(Scalar::Type elementType) {
}
template <typename AtomicOp, typename... Args>
-static BigInt* AtomicAccess64(JSContext* cx,
- FixedLengthTypedArrayObject* typedArray,
+static BigInt* AtomicAccess64(JSContext* cx, TypedArrayObject* typedArray,
size_t index, AtomicOp op, Args... args) {
MOZ_ASSERT(Scalar::isBigIntType(typedArray->type()));
MOZ_ASSERT(!typedArray->hasDetachedBuffer());
- MOZ_ASSERT(index < typedArray->length());
+ MOZ_ASSERT_IF(typedArray->hasResizableBuffer(), !typedArray->isOutOfBounds());
+ MOZ_ASSERT(index < typedArray->length().valueOr(0));
if (typedArray->type() == Scalar::BigInt64) {
SharedMem<int64_t*> addr = typedArray->dataPointerEither().cast<int64_t*>();
@@ -2795,11 +2802,12 @@ static BigInt* AtomicAccess64(JSContext* cx,
}
template <typename AtomicOp, typename... Args>
-static auto AtomicAccess64(FixedLengthTypedArrayObject* typedArray,
- size_t index, AtomicOp op, Args... args) {
+static auto AtomicAccess64(TypedArrayObject* typedArray, size_t index,
+ AtomicOp op, Args... args) {
MOZ_ASSERT(Scalar::isBigIntType(typedArray->type()));
MOZ_ASSERT(!typedArray->hasDetachedBuffer());
- MOZ_ASSERT(index < typedArray->length());
+ MOZ_ASSERT_IF(typedArray->hasResizableBuffer(), !typedArray->isOutOfBounds());
+ MOZ_ASSERT(index < typedArray->length().valueOr(0));
if (typedArray->type() == Scalar::BigInt64) {
SharedMem<int64_t*> addr = typedArray->dataPointerEither().cast<int64_t*>();
@@ -2810,14 +2818,14 @@ static auto AtomicAccess64(FixedLengthTypedArrayObject* typedArray,
return op(addr + index, BigInt::toUint64(args)...);
}
-BigInt* AtomicsLoad64(JSContext* cx, FixedLengthTypedArrayObject* typedArray,
+BigInt* AtomicsLoad64(JSContext* cx, TypedArrayObject* typedArray,
size_t index) {
return AtomicAccess64(cx, typedArray, index, [](auto addr) {
return jit::AtomicOperations::loadSeqCst(addr);
});
}
-void AtomicsStore64(FixedLengthTypedArrayObject* typedArray, size_t index,
+void AtomicsStore64(TypedArrayObject* typedArray, size_t index,
const BigInt* value) {
AutoUnsafeCallWithABI unsafe;
@@ -2829,8 +2837,7 @@ void AtomicsStore64(FixedLengthTypedArrayObject* typedArray, size_t index,
value);
}
-BigInt* AtomicsCompareExchange64(JSContext* cx,
- FixedLengthTypedArrayObject* typedArray,
+BigInt* AtomicsCompareExchange64(JSContext* cx, TypedArrayObject* typedArray,
size_t index, const BigInt* expected,
const BigInt* replacement) {
return AtomicAccess64(
@@ -2842,9 +2849,8 @@ BigInt* AtomicsCompareExchange64(JSContext* cx,
expected, replacement);
}
-BigInt* AtomicsExchange64(JSContext* cx,
- FixedLengthTypedArrayObject* typedArray, size_t index,
- const BigInt* value) {
+BigInt* AtomicsExchange64(JSContext* cx, TypedArrayObject* typedArray,
+ size_t index, const BigInt* value) {
return AtomicAccess64(
cx, typedArray, index,
[](auto addr, auto val) {
@@ -2853,8 +2859,8 @@ BigInt* AtomicsExchange64(JSContext* cx,
value);
}
-BigInt* AtomicsAdd64(JSContext* cx, FixedLengthTypedArrayObject* typedArray,
- size_t index, const BigInt* value) {
+BigInt* AtomicsAdd64(JSContext* cx, TypedArrayObject* typedArray, size_t index,
+ const BigInt* value) {
return AtomicAccess64(
cx, typedArray, index,
[](auto addr, auto val) {
@@ -2863,8 +2869,8 @@ BigInt* AtomicsAdd64(JSContext* cx, FixedLengthTypedArrayObject* typedArray,
value);
}
-BigInt* AtomicsAnd64(JSContext* cx, FixedLengthTypedArrayObject* typedArray,
- size_t index, const BigInt* value) {
+BigInt* AtomicsAnd64(JSContext* cx, TypedArrayObject* typedArray, size_t index,
+ const BigInt* value) {
return AtomicAccess64(
cx, typedArray, index,
[](auto addr, auto val) {
@@ -2873,8 +2879,8 @@ BigInt* AtomicsAnd64(JSContext* cx, FixedLengthTypedArrayObject* typedArray,
value);
}
-BigInt* AtomicsOr64(JSContext* cx, FixedLengthTypedArrayObject* typedArray,
- size_t index, const BigInt* value) {
+BigInt* AtomicsOr64(JSContext* cx, TypedArrayObject* typedArray, size_t index,
+ const BigInt* value) {
return AtomicAccess64(
cx, typedArray, index,
[](auto addr, auto val) {
@@ -2883,8 +2889,8 @@ BigInt* AtomicsOr64(JSContext* cx, FixedLengthTypedArrayObject* typedArray,
value);
}
-BigInt* AtomicsSub64(JSContext* cx, FixedLengthTypedArrayObject* typedArray,
- size_t index, const BigInt* value) {
+BigInt* AtomicsSub64(JSContext* cx, TypedArrayObject* typedArray, size_t index,
+ const BigInt* value) {
return AtomicAccess64(
cx, typedArray, index,
[](auto addr, auto val) {
@@ -2893,8 +2899,8 @@ BigInt* AtomicsSub64(JSContext* cx, FixedLengthTypedArrayObject* typedArray,
value);
}
-BigInt* AtomicsXor64(JSContext* cx, FixedLengthTypedArrayObject* typedArray,
- size_t index, const BigInt* value) {
+BigInt* AtomicsXor64(JSContext* cx, TypedArrayObject* typedArray, size_t index,
+ const BigInt* value) {
return AtomicAccess64(
cx, typedArray, index,
[](auto addr, auto val) {
diff --git a/js/src/jit/VMFunctions.h b/js/src/jit/VMFunctions.h
index cfce36caaa..a68dd8279f 100644
--- a/js/src/jit/VMFunctions.h
+++ b/js/src/jit/VMFunctions.h
@@ -27,7 +27,6 @@ namespace js {
class AbstractGeneratorObject;
class ArrayObject;
-class FixedLengthTypedArrayObject;
class GlobalObject;
class InterpreterFrame;
class LexicalScope;
@@ -640,11 +639,11 @@ bool StringBigIntCompare(JSContext* cx, HandleString x, HandleBigInt y,
BigInt* BigIntAsIntN(JSContext* cx, HandleBigInt x, int32_t bits);
BigInt* BigIntAsUintN(JSContext* cx, HandleBigInt x, int32_t bits);
-using AtomicsCompareExchangeFn = int32_t (*)(FixedLengthTypedArrayObject*,
- size_t, int32_t, int32_t);
+using AtomicsCompareExchangeFn = int32_t (*)(TypedArrayObject*, size_t, int32_t,
+ int32_t);
-using AtomicsReadWriteModifyFn = int32_t (*)(FixedLengthTypedArrayObject*,
- size_t, int32_t);
+using AtomicsReadWriteModifyFn = int32_t (*)(TypedArrayObject*, size_t,
+ int32_t);
AtomicsCompareExchangeFn AtomicsCompareExchange(Scalar::Type elementType);
AtomicsReadWriteModifyFn AtomicsExchange(Scalar::Type elementType);
@@ -654,31 +653,29 @@ AtomicsReadWriteModifyFn AtomicsAnd(Scalar::Type elementType);
AtomicsReadWriteModifyFn AtomicsOr(Scalar::Type elementType);
AtomicsReadWriteModifyFn AtomicsXor(Scalar::Type elementType);
-BigInt* AtomicsLoad64(JSContext* cx, FixedLengthTypedArrayObject* typedArray,
+BigInt* AtomicsLoad64(JSContext* cx, TypedArrayObject* typedArray,
size_t index);
-void AtomicsStore64(FixedLengthTypedArrayObject* typedArray, size_t index,
+void AtomicsStore64(TypedArrayObject* typedArray, size_t index,
const BigInt* value);
-BigInt* AtomicsCompareExchange64(JSContext* cx,
- FixedLengthTypedArrayObject* typedArray,
+BigInt* AtomicsCompareExchange64(JSContext* cx, TypedArrayObject* typedArray,
size_t index, const BigInt* expected,
const BigInt* replacement);
-BigInt* AtomicsExchange64(JSContext* cx,
- FixedLengthTypedArrayObject* typedArray, size_t index,
- const BigInt* value);
-
-BigInt* AtomicsAdd64(JSContext* cx, FixedLengthTypedArrayObject* typedArray,
- size_t index, const BigInt* value);
-BigInt* AtomicsAnd64(JSContext* cx, FixedLengthTypedArrayObject* typedArray,
- size_t index, const BigInt* value);
-BigInt* AtomicsOr64(JSContext* cx, FixedLengthTypedArrayObject* typedArray,
- size_t index, const BigInt* value);
-BigInt* AtomicsSub64(JSContext* cx, FixedLengthTypedArrayObject* typedArray,
- size_t index, const BigInt* value);
-BigInt* AtomicsXor64(JSContext* cx, FixedLengthTypedArrayObject* typedArray,
- size_t index, const BigInt* value);
+BigInt* AtomicsExchange64(JSContext* cx, TypedArrayObject* typedArray,
+ size_t index, const BigInt* value);
+
+BigInt* AtomicsAdd64(JSContext* cx, TypedArrayObject* typedArray, size_t index,
+ const BigInt* value);
+BigInt* AtomicsAnd64(JSContext* cx, TypedArrayObject* typedArray, size_t index,
+ const BigInt* value);
+BigInt* AtomicsOr64(JSContext* cx, TypedArrayObject* typedArray, size_t index,
+ const BigInt* value);
+BigInt* AtomicsSub64(JSContext* cx, TypedArrayObject* typedArray, size_t index,
+ const BigInt* value);
+BigInt* AtomicsXor64(JSContext* cx, TypedArrayObject* typedArray, size_t index,
+ const BigInt* value);
JSAtom* AtomizeStringNoGC(JSContext* cx, JSString* str);
diff --git a/js/src/jit/WarpBuilderShared.cpp b/js/src/jit/WarpBuilderShared.cpp
index 89e93d6150..e04984690c 100644
--- a/js/src/jit/WarpBuilderShared.cpp
+++ b/js/src/jit/WarpBuilderShared.cpp
@@ -22,9 +22,12 @@ WarpBuilderShared::WarpBuilderShared(WarpSnapshot& snapshot,
bool WarpBuilderShared::resumeAfter(MInstruction* ins, BytecodeLocation loc) {
// resumeAfter should only be used with effectful instructions. The only
- // exception is MInt64ToBigInt, it's used to convert the result of a call into
- // Wasm code so we attach the resume point to that instead of to the call.
- MOZ_ASSERT(ins->isEffectful() || ins->isInt64ToBigInt());
+ // exceptions are:
+ // 1. MInt64ToBigInt, which is used to convert the result of a call into Wasm
+ // code so we attach the resume point to that instead of to the call.
+ // 2. MPostIntPtrConversion which is used after conversion from IntPtr.
+ MOZ_ASSERT(ins->isEffectful() || ins->isInt64ToBigInt() ||
+ ins->isPostIntPtrConversion());
MOZ_ASSERT(!ins->isMovable());
MResumePoint* resumePoint = MResumePoint::New(
diff --git a/js/src/jit/WarpCacheIRTranspiler.cpp b/js/src/jit/WarpCacheIRTranspiler.cpp
index 7654232ecd..9a99e0f5c3 100644
--- a/js/src/jit/WarpCacheIRTranspiler.cpp
+++ b/js/src/jit/WarpCacheIRTranspiler.cpp
@@ -11,8 +11,6 @@
#include "jsmath.h"
-#include "builtin/DataViewObject.h"
-#include "builtin/MapObject.h"
#include "jit/AtomicOp.h"
#include "jit/CacheIR.h"
#include "jit/CacheIRCompiler.h"
@@ -26,7 +24,6 @@
#include "jit/WarpBuilderShared.h"
#include "jit/WarpSnapshot.h"
#include "js/ScalarType.h" // js::Scalar::Type
-#include "vm/ArgumentsObject.h"
#include "vm/BytecodeLocation.h"
#include "wasm/WasmCode.h"
@@ -52,7 +49,8 @@ class MOZ_RAII WarpCacheIRTranspiler : public WarpBuilderShared {
// Array mapping call arguments to OperandId.
using ArgumentKindArray =
- mozilla::EnumeratedArray<ArgumentKind, ArgumentKind::NumKinds, OperandId>;
+ mozilla::EnumeratedArray<ArgumentKind, OperandId,
+ size_t(ArgumentKind::NumKinds)>;
ArgumentKindArray argumentOperandIds_;
void setArgumentId(ArgumentKind kind, OperandId id) {
@@ -255,14 +253,20 @@ class MOZ_RAII WarpCacheIRTranspiler : public WarpBuilderShared {
ObjOperandId objId, uint32_t offsetOffset,
ValOperandId rhsId, uint32_t newShapeOffset);
- void addDataViewData(MDefinition* obj, Scalar::Type type,
- MDefinition** offset, MInstruction** elements);
+ MInstruction* emitTypedArrayLength(ArrayBufferViewKind viewKind,
+ MDefinition* obj);
- [[nodiscard]] bool emitAtomicsBinaryOp(ObjOperandId objId,
- IntPtrOperandId indexId,
- uint32_t valueId,
- Scalar::Type elementType,
- bool forEffect, AtomicOp op);
+ MInstruction* emitDataViewLength(ArrayBufferViewKind viewKind,
+ MDefinition* obj);
+
+ void addDataViewData(ArrayBufferViewKind viewKind, MDefinition* obj,
+ Scalar::Type type, MDefinition** offset,
+ MInstruction** elements);
+
+ [[nodiscard]] bool emitAtomicsBinaryOp(
+ ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId,
+ Scalar::Type elementType, bool forEffect, ArrayBufferViewKind viewKind,
+ AtomicOp op);
[[nodiscard]] bool emitLoadArgumentSlot(ValOperandId resultId,
uint32_t slotIndex);
@@ -347,9 +351,14 @@ bool WarpCacheIRTranspiler::transpile(
// Effectful instructions should have a resume point. MIonToWasmCall is an
// exception: we can attach the resume point to the MInt64ToBigInt instruction
- // instead.
+ // instead. Other exceptions are MResizableTypedArrayLength and
+ // MResizableDataViewByteLength, and MGrowableSharedArrayBufferByteLength,
+ // which add the resume point to MPostIntPtrConversion.
MOZ_ASSERT_IF(effectful_,
- effectful_->resumePoint() || effectful_->isIonToWasmCall());
+ effectful_->resumePoint() || effectful_->isIonToWasmCall() ||
+ effectful_->isResizableTypedArrayLength() ||
+ effectful_->isResizableDataViewByteLength() ||
+ effectful_->isGrowableSharedArrayBufferByteLength());
return true;
}
@@ -385,31 +394,44 @@ bool WarpCacheIRTranspiler::emitGuardClass(ObjOperandId objId,
return true;
}
+bool WarpCacheIRTranspiler::emitGuardEitherClass(ObjOperandId objId,
+ GuardClassKind kind1,
+ GuardClassKind kind2) {
+ MDefinition* def = getOperand(objId);
+
+ // We don't yet need this case, so it's unsupported for now.
+ MOZ_ASSERT(kind1 != GuardClassKind::JSFunction &&
+ kind2 != GuardClassKind::JSFunction);
+
+ const JSClass* classp1 = classForGuardClassKind(kind1);
+ const JSClass* classp2 = classForGuardClassKind(kind2);
+ auto* ins = MGuardToEitherClass::New(alloc(), def, classp1, classp2);
+
+ add(ins);
+
+ setOperand(objId, ins);
+ return true;
+}
+
const JSClass* WarpCacheIRTranspiler::classForGuardClassKind(
GuardClassKind kind) {
switch (kind) {
case GuardClassKind::Array:
- return &ArrayObject::class_;
case GuardClassKind::PlainObject:
- return &PlainObject::class_;
case GuardClassKind::FixedLengthArrayBuffer:
- return &FixedLengthArrayBufferObject::class_;
+ case GuardClassKind::ResizableArrayBuffer:
case GuardClassKind::FixedLengthSharedArrayBuffer:
- return &FixedLengthSharedArrayBufferObject::class_;
+ case GuardClassKind::GrowableSharedArrayBuffer:
case GuardClassKind::FixedLengthDataView:
- return &FixedLengthDataViewObject::class_;
+ case GuardClassKind::ResizableDataView:
case GuardClassKind::MappedArguments:
- return &MappedArgumentsObject::class_;
case GuardClassKind::UnmappedArguments:
- return &UnmappedArgumentsObject::class_;
- case GuardClassKind::WindowProxy:
- return mirGen().runtime->maybeWindowProxyClass();
case GuardClassKind::Set:
- return &SetObject::class_;
case GuardClassKind::Map:
- return &MapObject::class_;
case GuardClassKind::BoundFunction:
- return &BoundFunctionObject::class_;
+ return ClassFor(kind);
+ case GuardClassKind::WindowProxy:
+ return mirGen().runtime->maybeWindowProxyClass();
case GuardClassKind::JSFunction:
break;
}
@@ -830,6 +852,16 @@ bool WarpCacheIRTranspiler::emitGuardIsFixedLengthTypedArray(
return true;
}
+bool WarpCacheIRTranspiler::emitGuardIsResizableTypedArray(ObjOperandId objId) {
+ MDefinition* obj = getOperand(objId);
+
+ auto* ins = MGuardIsResizableTypedArray::New(alloc(), obj);
+ add(ins);
+
+ setOperand(objId, ins);
+ return true;
+}
+
bool WarpCacheIRTranspiler::emitGuardHasProxyHandler(ObjOperandId objId,
uint32_t handlerOffset) {
MDefinition* obj = getOperand(objId);
@@ -2121,13 +2153,35 @@ bool WarpCacheIRTranspiler::emitCallObjectHasSparseElementResult(
return true;
}
+MInstruction* WarpCacheIRTranspiler::emitTypedArrayLength(
+ ArrayBufferViewKind viewKind, MDefinition* obj) {
+ if (viewKind == ArrayBufferViewKind::FixedLength) {
+ auto* length = MArrayBufferViewLength::New(alloc(), obj);
+ add(length);
+
+ return length;
+ }
+
+ // Bounds check doesn't require a memory barrier. See IsValidIntegerIndex
+ // abstract operation which reads the underlying buffer byte length using
+ // "unordered" memory order.
+ auto barrier = MemoryBarrierRequirement::NotRequired;
+
+ // Movable and removable because no memory barrier is needed.
+ auto* length = MResizableTypedArrayLength::New(alloc(), obj, barrier);
+ length->setMovable();
+ length->setNotGuard();
+ add(length);
+
+ return length;
+}
+
bool WarpCacheIRTranspiler::emitLoadTypedArrayElementExistsResult(
- ObjOperandId objId, IntPtrOperandId indexId) {
+ ObjOperandId objId, IntPtrOperandId indexId, ArrayBufferViewKind viewKind) {
MDefinition* obj = getOperand(objId);
MDefinition* index = getOperand(indexId);
- auto* length = MArrayBufferViewLength::New(alloc(), obj);
- add(length);
+ auto* length = emitTypedArrayLength(viewKind, obj);
// Unsigned comparison to catch negative indices.
auto* ins = MCompare::New(alloc(), index, length, JSOp::Lt,
@@ -2165,27 +2219,29 @@ static MIRType MIRTypeForArrayBufferViewRead(Scalar::Type arrayType,
bool WarpCacheIRTranspiler::emitLoadTypedArrayElementResult(
ObjOperandId objId, IntPtrOperandId indexId, Scalar::Type elementType,
- bool handleOOB, bool forceDoubleForUint32) {
+ bool handleOOB, bool forceDoubleForUint32, ArrayBufferViewKind viewKind) {
MDefinition* obj = getOperand(objId);
MDefinition* index = getOperand(indexId);
+ auto* length = emitTypedArrayLength(viewKind, obj);
+
+ if (!handleOOB) {
+ // MLoadTypedArrayElementHole does the bounds checking.
+ index = addBoundsCheck(index, length);
+ }
+
+ auto* elements = MArrayBufferViewElements::New(alloc(), obj);
+ add(elements);
+
if (handleOOB) {
auto* load = MLoadTypedArrayElementHole::New(
- alloc(), obj, index, elementType, forceDoubleForUint32);
+ alloc(), elements, index, length, elementType, forceDoubleForUint32);
add(load);
pushResult(load);
return true;
}
- auto* length = MArrayBufferViewLength::New(alloc(), obj);
- add(length);
-
- index = addBoundsCheck(index, length);
-
- auto* elements = MArrayBufferViewElements::New(alloc(), obj);
- add(elements);
-
auto* load = MLoadUnboxedScalar::New(alloc(), elements, index, elementType);
load->setResultType(
MIRTypeForArrayBufferViewRead(elementType, forceDoubleForUint32));
@@ -2717,17 +2773,14 @@ bool WarpCacheIRTranspiler::emitStoreDenseElementHole(ObjOperandId objId,
return resumeAfter(store);
}
-bool WarpCacheIRTranspiler::emitStoreTypedArrayElement(ObjOperandId objId,
- Scalar::Type elementType,
- IntPtrOperandId indexId,
- uint32_t rhsId,
- bool handleOOB) {
+bool WarpCacheIRTranspiler::emitStoreTypedArrayElement(
+ ObjOperandId objId, Scalar::Type elementType, IntPtrOperandId indexId,
+ uint32_t rhsId, bool handleOOB, ArrayBufferViewKind viewKind) {
MDefinition* obj = getOperand(objId);
MDefinition* index = getOperand(indexId);
MDefinition* rhs = getOperand(ValOperandId(rhsId));
- auto* length = MArrayBufferViewLength::New(alloc(), obj);
- add(length);
+ auto* length = emitTypedArrayLength(viewKind, obj);
if (!handleOOB) {
// MStoreTypedArrayElementHole does the bounds checking.
@@ -2749,11 +2802,34 @@ bool WarpCacheIRTranspiler::emitStoreTypedArrayElement(ObjOperandId objId,
return resumeAfter(store);
}
-void WarpCacheIRTranspiler::addDataViewData(MDefinition* obj, Scalar::Type type,
+MInstruction* WarpCacheIRTranspiler::emitDataViewLength(
+ ArrayBufferViewKind viewKind, MDefinition* obj) {
+ if (viewKind == ArrayBufferViewKind::FixedLength) {
+ auto* length = MArrayBufferViewLength::New(alloc(), obj);
+ add(length);
+
+ return length;
+ }
+
+ // Bounds check doesn't require a memory barrier. See GetViewValue and
+ // SetViewValue abstract operations which read the underlying buffer byte
+ // length using "unordered" memory order.
+ auto barrier = MemoryBarrierRequirement::NotRequired;
+
+ // Movable and removable because no memory barrier is needed.
+ auto* length = MResizableDataViewByteLength::New(alloc(), obj, barrier);
+ length->setMovable();
+ length->setNotGuard();
+ add(length);
+
+ return length;
+}
+
+void WarpCacheIRTranspiler::addDataViewData(ArrayBufferViewKind viewKind,
+ MDefinition* obj, Scalar::Type type,
MDefinition** offset,
MInstruction** elements) {
- MInstruction* length = MArrayBufferViewLength::New(alloc(), obj);
- add(length);
+ auto* length = emitDataViewLength(viewKind, obj);
// Adjust the length to account for accesses near the end of the dataview.
if (size_t byteSize = Scalar::byteSize(type); byteSize > 1) {
@@ -2773,14 +2849,14 @@ void WarpCacheIRTranspiler::addDataViewData(MDefinition* obj, Scalar::Type type,
bool WarpCacheIRTranspiler::emitLoadDataViewValueResult(
ObjOperandId objId, IntPtrOperandId offsetId,
BooleanOperandId littleEndianId, Scalar::Type elementType,
- bool forceDoubleForUint32) {
+ bool forceDoubleForUint32, ArrayBufferViewKind viewKind) {
MDefinition* obj = getOperand(objId);
MDefinition* offset = getOperand(offsetId);
MDefinition* littleEndian = getOperand(littleEndianId);
// Add bounds check and get the DataViewObject's elements.
MInstruction* elements;
- addDataViewData(obj, elementType, &offset, &elements);
+ addDataViewData(viewKind, obj, elementType, &offset, &elements);
// Load the element.
MInstruction* load;
@@ -2802,7 +2878,8 @@ bool WarpCacheIRTranspiler::emitLoadDataViewValueResult(
bool WarpCacheIRTranspiler::emitStoreDataViewValueResult(
ObjOperandId objId, IntPtrOperandId offsetId, uint32_t valueId,
- BooleanOperandId littleEndianId, Scalar::Type elementType) {
+ BooleanOperandId littleEndianId, Scalar::Type elementType,
+ ArrayBufferViewKind viewKind) {
MDefinition* obj = getOperand(objId);
MDefinition* offset = getOperand(offsetId);
MDefinition* value = getOperand(ValOperandId(valueId));
@@ -2810,7 +2887,7 @@ bool WarpCacheIRTranspiler::emitStoreDataViewValueResult(
// Add bounds check and get the DataViewObject's elements.
MInstruction* elements;
- addDataViewData(obj, elementType, &offset, &elements);
+ addDataViewData(viewKind, obj, elementType, &offset, &elements);
// Store the element.
MInstruction* store;
@@ -4067,6 +4144,78 @@ bool WarpCacheIRTranspiler::emitArrayBufferViewByteOffsetDoubleResult(
return true;
}
+bool WarpCacheIRTranspiler::
+ emitResizableTypedArrayByteOffsetMaybeOutOfBoundsInt32Result(
+ ObjOperandId objId) {
+ MDefinition* obj = getOperand(objId);
+
+ auto* byteOffset =
+ MResizableTypedArrayByteOffsetMaybeOutOfBounds::New(alloc(), obj);
+ add(byteOffset);
+
+ auto* byteOffsetInt32 = MNonNegativeIntPtrToInt32::New(alloc(), byteOffset);
+ add(byteOffsetInt32);
+
+ pushResult(byteOffsetInt32);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::
+ emitResizableTypedArrayByteOffsetMaybeOutOfBoundsDoubleResult(
+ ObjOperandId objId) {
+ MDefinition* obj = getOperand(objId);
+
+ auto* byteOffset =
+ MResizableTypedArrayByteOffsetMaybeOutOfBounds::New(alloc(), obj);
+ add(byteOffset);
+
+ auto* byteOffsetDouble = MIntPtrToDouble::New(alloc(), byteOffset);
+ add(byteOffsetDouble);
+
+ pushResult(byteOffsetDouble);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitResizableTypedArrayLengthInt32Result(
+ ObjOperandId objId) {
+ MDefinition* obj = getOperand(objId);
+
+ // Explicit |length| accesses are seq-consistent atomic loads.
+ auto barrier = MemoryBarrierRequirement::Required;
+
+ auto* length = MResizableTypedArrayLength::New(alloc(), obj, barrier);
+ addEffectful(length);
+
+ auto* lengthInt32 = MNonNegativeIntPtrToInt32::New(alloc(), length);
+ add(lengthInt32);
+
+ auto* postConversion = MPostIntPtrConversion::New(alloc(), lengthInt32);
+ add(postConversion);
+
+ pushResult(postConversion);
+ return resumeAfterUnchecked(postConversion);
+}
+
+bool WarpCacheIRTranspiler::emitResizableTypedArrayLengthDoubleResult(
+ ObjOperandId objId) {
+ MDefinition* obj = getOperand(objId);
+
+ // Explicit |length| accesses are seq-consistent atomic loads.
+ auto barrier = MemoryBarrierRequirement::Required;
+
+ auto* length = MResizableTypedArrayLength::New(alloc(), obj, barrier);
+ addEffectful(length);
+
+ auto* lengthDouble = MIntPtrToDouble::New(alloc(), length);
+ add(lengthDouble);
+
+ auto* postConversion = MPostIntPtrConversion::New(alloc(), lengthDouble);
+ add(postConversion);
+
+ pushResult(postConversion);
+ return resumeAfterUnchecked(postConversion);
+}
+
bool WarpCacheIRTranspiler::emitTypedArrayByteLengthInt32Result(
ObjOperandId objId) {
MDefinition* obj = getOperand(objId);
@@ -4112,6 +4261,63 @@ bool WarpCacheIRTranspiler::emitTypedArrayByteLengthDoubleResult(
return true;
}
+bool WarpCacheIRTranspiler::emitResizableTypedArrayByteLengthInt32Result(
+ ObjOperandId objId) {
+ MDefinition* obj = getOperand(objId);
+
+ // Explicit |byteLength| accesses are seq-consistent atomic loads.
+ auto barrier = MemoryBarrierRequirement::Required;
+
+ auto* length = MResizableTypedArrayLength::New(alloc(), obj, barrier);
+ addEffectful(length);
+
+ auto* lengthInt32 = MNonNegativeIntPtrToInt32::New(alloc(), length);
+ add(lengthInt32);
+
+ auto* size = MTypedArrayElementSize::New(alloc(), obj);
+ add(size);
+
+ auto* mul = MMul::New(alloc(), lengthInt32, size, MIRType::Int32);
+ mul->setCanBeNegativeZero(false);
+ add(mul);
+
+ auto* postConversion = MPostIntPtrConversion::New(alloc(), mul);
+ add(postConversion);
+
+ pushResult(postConversion);
+ return resumeAfterUnchecked(postConversion);
+}
+
+bool WarpCacheIRTranspiler::emitResizableTypedArrayByteLengthDoubleResult(
+ ObjOperandId objId) {
+ MDefinition* obj = getOperand(objId);
+
+ // Explicit |byteLength| accesses are seq-consistent atomic loads.
+ auto barrier = MemoryBarrierRequirement::Required;
+
+ auto* length = MResizableTypedArrayLength::New(alloc(), obj, barrier);
+ addEffectful(length);
+
+ auto* lengthDouble = MIntPtrToDouble::New(alloc(), length);
+ add(lengthDouble);
+
+ auto* size = MTypedArrayElementSize::New(alloc(), obj);
+ add(size);
+
+ auto* sizeDouble = MToDouble::New(alloc(), size);
+ add(sizeDouble);
+
+ auto* mul = MMul::New(alloc(), lengthDouble, sizeDouble, MIRType::Double);
+ mul->setCanBeNegativeZero(false);
+ add(mul);
+
+ auto* postConversion = MPostIntPtrConversion::New(alloc(), mul);
+ add(postConversion);
+
+ pushResult(postConversion);
+ return resumeAfterUnchecked(postConversion);
+}
+
bool WarpCacheIRTranspiler::emitTypedArrayElementSizeResult(
ObjOperandId objId) {
MDefinition* obj = getOperand(objId);
@@ -4123,6 +4329,80 @@ bool WarpCacheIRTranspiler::emitTypedArrayElementSizeResult(
return true;
}
+bool WarpCacheIRTranspiler::emitResizableDataViewByteLengthInt32Result(
+ ObjOperandId objId) {
+ MDefinition* obj = getOperand(objId);
+
+ // Explicit |byteLength| accesses are seq-consistent atomic loads.
+ auto barrier = MemoryBarrierRequirement::Required;
+
+ auto* length = MResizableDataViewByteLength::New(alloc(), obj, barrier);
+ addEffectful(length);
+
+ auto* lengthInt32 = MNonNegativeIntPtrToInt32::New(alloc(), length);
+ add(lengthInt32);
+
+ auto* postConversion = MPostIntPtrConversion::New(alloc(), lengthInt32);
+ add(postConversion);
+
+ pushResult(postConversion);
+ return resumeAfterUnchecked(postConversion);
+}
+
+bool WarpCacheIRTranspiler::emitResizableDataViewByteLengthDoubleResult(
+ ObjOperandId objId) {
+ MDefinition* obj = getOperand(objId);
+
+ // Explicit |byteLength| accesses are seq-consistent atomic loads.
+ auto barrier = MemoryBarrierRequirement::Required;
+
+ auto* length = MResizableDataViewByteLength::New(alloc(), obj, barrier);
+ addEffectful(length);
+
+ auto* lengthDouble = MIntPtrToDouble::New(alloc(), length);
+ add(lengthDouble);
+
+ auto* postConversion = MPostIntPtrConversion::New(alloc(), lengthDouble);
+ add(postConversion);
+
+ pushResult(postConversion);
+ return resumeAfterUnchecked(postConversion);
+}
+
+bool WarpCacheIRTranspiler::emitGrowableSharedArrayBufferByteLengthInt32Result(
+ ObjOperandId objId) {
+ MDefinition* obj = getOperand(objId);
+
+ auto* length = MGrowableSharedArrayBufferByteLength::New(alloc(), obj);
+ addEffectful(length);
+
+ auto* lengthInt32 = MNonNegativeIntPtrToInt32::New(alloc(), length);
+ add(lengthInt32);
+
+ auto* postConversion = MPostIntPtrConversion::New(alloc(), lengthInt32);
+ add(postConversion);
+
+ pushResult(postConversion);
+ return resumeAfterUnchecked(postConversion);
+}
+
+bool WarpCacheIRTranspiler::emitGrowableSharedArrayBufferByteLengthDoubleResult(
+ ObjOperandId objId) {
+ MDefinition* obj = getOperand(objId);
+
+ auto* length = MGrowableSharedArrayBufferByteLength::New(alloc(), obj);
+ addEffectful(length);
+
+ auto* lengthDouble = MIntPtrToDouble::New(alloc(), length);
+ add(lengthDouble);
+
+ auto* postConversion = MPostIntPtrConversion::New(alloc(), lengthDouble);
+ add(postConversion);
+
+ pushResult(postConversion);
+ return resumeAfterUnchecked(postConversion);
+}
+
bool WarpCacheIRTranspiler::emitGuardHasAttachedArrayBuffer(
ObjOperandId objId) {
MDefinition* obj = getOperand(objId);
@@ -4134,6 +4414,29 @@ bool WarpCacheIRTranspiler::emitGuardHasAttachedArrayBuffer(
return true;
}
+bool WarpCacheIRTranspiler::emitGuardResizableArrayBufferViewInBounds(
+ ObjOperandId objId) {
+ MDefinition* obj = getOperand(objId);
+
+ auto* ins = MGuardResizableArrayBufferViewInBounds::New(alloc(), obj);
+ add(ins);
+
+ setOperand(objId, ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitGuardResizableArrayBufferViewInBoundsOrDetached(
+ ObjOperandId objId) {
+ MDefinition* obj = getOperand(objId);
+
+ auto* ins =
+ MGuardResizableArrayBufferViewInBoundsOrDetached::New(alloc(), obj);
+ add(ins);
+
+ setOperand(objId, ins);
+ return true;
+}
+
bool WarpCacheIRTranspiler::emitIsTypedArrayConstructorResult(
ObjOperandId objId) {
MDefinition* obj = getOperand(objId);
@@ -4318,14 +4621,14 @@ bool WarpCacheIRTranspiler::emitNewTypedArrayFromArrayResult(
bool WarpCacheIRTranspiler::emitAtomicsCompareExchangeResult(
ObjOperandId objId, IntPtrOperandId indexId, uint32_t expectedId,
- uint32_t replacementId, Scalar::Type elementType) {
+ uint32_t replacementId, Scalar::Type elementType,
+ ArrayBufferViewKind viewKind) {
MDefinition* obj = getOperand(objId);
MDefinition* index = getOperand(indexId);
MDefinition* expected = getOperand(ValOperandId(expectedId));
MDefinition* replacement = getOperand(ValOperandId(replacementId));
- auto* length = MArrayBufferViewLength::New(alloc(), obj);
- add(length);
+ auto* length = emitTypedArrayLength(viewKind, obj);
index = addBoundsCheck(index, length);
@@ -4347,13 +4650,12 @@ bool WarpCacheIRTranspiler::emitAtomicsCompareExchangeResult(
bool WarpCacheIRTranspiler::emitAtomicsExchangeResult(
ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId,
- Scalar::Type elementType) {
+ Scalar::Type elementType, ArrayBufferViewKind viewKind) {
MDefinition* obj = getOperand(objId);
MDefinition* index = getOperand(indexId);
MDefinition* value = getOperand(ValOperandId(valueId));
- auto* length = MArrayBufferViewLength::New(alloc(), obj);
- add(length);
+ auto* length = emitTypedArrayLength(viewKind, obj);
index = addBoundsCheck(index, length);
@@ -4373,17 +4675,15 @@ bool WarpCacheIRTranspiler::emitAtomicsExchangeResult(
return resumeAfter(exchange);
}
-bool WarpCacheIRTranspiler::emitAtomicsBinaryOp(ObjOperandId objId,
- IntPtrOperandId indexId,
- uint32_t valueId,
- Scalar::Type elementType,
- bool forEffect, AtomicOp op) {
+bool WarpCacheIRTranspiler::emitAtomicsBinaryOp(
+ ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId,
+ Scalar::Type elementType, bool forEffect, ArrayBufferViewKind viewKind,
+ AtomicOp op) {
MDefinition* obj = getOperand(objId);
MDefinition* index = getOperand(indexId);
MDefinition* value = getOperand(ValOperandId(valueId));
- auto* length = MArrayBufferViewLength::New(alloc(), obj);
- add(length);
+ auto* length = emitTypedArrayLength(viewKind, obj);
index = addBoundsCheck(index, length);
@@ -4409,59 +4709,48 @@ bool WarpCacheIRTranspiler::emitAtomicsBinaryOp(ObjOperandId objId,
return resumeAfter(binop);
}
-bool WarpCacheIRTranspiler::emitAtomicsAddResult(ObjOperandId objId,
- IntPtrOperandId indexId,
- uint32_t valueId,
- Scalar::Type elementType,
- bool forEffect) {
+bool WarpCacheIRTranspiler::emitAtomicsAddResult(
+ ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId,
+ Scalar::Type elementType, bool forEffect, ArrayBufferViewKind viewKind) {
return emitAtomicsBinaryOp(objId, indexId, valueId, elementType, forEffect,
- AtomicFetchAddOp);
+ viewKind, AtomicOp::Add);
}
-bool WarpCacheIRTranspiler::emitAtomicsSubResult(ObjOperandId objId,
- IntPtrOperandId indexId,
- uint32_t valueId,
- Scalar::Type elementType,
- bool forEffect) {
+bool WarpCacheIRTranspiler::emitAtomicsSubResult(
+ ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId,
+ Scalar::Type elementType, bool forEffect, ArrayBufferViewKind viewKind) {
return emitAtomicsBinaryOp(objId, indexId, valueId, elementType, forEffect,
- AtomicFetchSubOp);
+ viewKind, AtomicOp::Sub);
}
-bool WarpCacheIRTranspiler::emitAtomicsAndResult(ObjOperandId objId,
- IntPtrOperandId indexId,
- uint32_t valueId,
- Scalar::Type elementType,
- bool forEffect) {
+bool WarpCacheIRTranspiler::emitAtomicsAndResult(
+ ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId,
+ Scalar::Type elementType, bool forEffect, ArrayBufferViewKind viewKind) {
return emitAtomicsBinaryOp(objId, indexId, valueId, elementType, forEffect,
- AtomicFetchAndOp);
+ viewKind, AtomicOp::And);
}
-bool WarpCacheIRTranspiler::emitAtomicsOrResult(ObjOperandId objId,
- IntPtrOperandId indexId,
- uint32_t valueId,
- Scalar::Type elementType,
- bool forEffect) {
+bool WarpCacheIRTranspiler::emitAtomicsOrResult(
+ ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId,
+ Scalar::Type elementType, bool forEffect, ArrayBufferViewKind viewKind) {
return emitAtomicsBinaryOp(objId, indexId, valueId, elementType, forEffect,
- AtomicFetchOrOp);
+ viewKind, AtomicOp::Or);
}
-bool WarpCacheIRTranspiler::emitAtomicsXorResult(ObjOperandId objId,
- IntPtrOperandId indexId,
- uint32_t valueId,
- Scalar::Type elementType,
- bool forEffect) {
+bool WarpCacheIRTranspiler::emitAtomicsXorResult(
+ ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId,
+ Scalar::Type elementType, bool forEffect, ArrayBufferViewKind viewKind) {
return emitAtomicsBinaryOp(objId, indexId, valueId, elementType, forEffect,
- AtomicFetchXorOp);
+ viewKind, AtomicOp::Xor);
}
-bool WarpCacheIRTranspiler::emitAtomicsLoadResult(ObjOperandId objId,
- IntPtrOperandId indexId,
- Scalar::Type elementType) {
+bool WarpCacheIRTranspiler::emitAtomicsLoadResult(
+ ObjOperandId objId, IntPtrOperandId indexId, Scalar::Type elementType,
+ ArrayBufferViewKind viewKind) {
MDefinition* obj = getOperand(objId);
MDefinition* index = getOperand(indexId);
- auto* length = MArrayBufferViewLength::New(alloc(), obj);
- add(length);
+ auto* length = emitTypedArrayLength(viewKind, obj);
index = addBoundsCheck(index, length);
@@ -4473,7 +4762,7 @@ bool WarpCacheIRTranspiler::emitAtomicsLoadResult(ObjOperandId objId,
MIRTypeForArrayBufferViewRead(elementType, forceDoubleForUint32);
auto* load = MLoadUnboxedScalar::New(alloc(), elements, index, elementType,
- DoesRequireMemoryBarrier);
+ MemoryBarrierRequirement::Required);
load->setResultType(knownType);
addEffectful(load);
@@ -4481,24 +4770,23 @@ bool WarpCacheIRTranspiler::emitAtomicsLoadResult(ObjOperandId objId,
return resumeAfter(load);
}
-bool WarpCacheIRTranspiler::emitAtomicsStoreResult(ObjOperandId objId,
- IntPtrOperandId indexId,
- uint32_t valueId,
- Scalar::Type elementType) {
+bool WarpCacheIRTranspiler::emitAtomicsStoreResult(
+ ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId,
+ Scalar::Type elementType, ArrayBufferViewKind viewKind) {
MDefinition* obj = getOperand(objId);
MDefinition* index = getOperand(indexId);
MDefinition* value = getOperand(ValOperandId(valueId));
- auto* length = MArrayBufferViewLength::New(alloc(), obj);
- add(length);
+ auto* length = emitTypedArrayLength(viewKind, obj);
index = addBoundsCheck(index, length);
auto* elements = MArrayBufferViewElements::New(alloc(), obj);
add(elements);
- auto* store = MStoreUnboxedScalar::New(alloc(), elements, index, value,
- elementType, DoesRequireMemoryBarrier);
+ auto* store =
+ MStoreUnboxedScalar::New(alloc(), elements, index, value, elementType,
+ MemoryBarrierRequirement::Required);
addEffectful(store);
pushResult(value);
diff --git a/js/src/jit/arm/CodeGenerator-arm.cpp b/js/src/jit/arm/CodeGenerator-arm.cpp
index 0c35309c7e..98675164a9 100644
--- a/js/src/jit/arm/CodeGenerator-arm.cpp
+++ b/js/src/jit/arm/CodeGenerator-arm.cpp
@@ -2404,10 +2404,6 @@ void CodeGenerator::visitNegF(LNegF* ins) {
masm.ma_vneg_f32(input, ToFloatRegister(ins->output()));
}
-void CodeGenerator::visitMemoryBarrier(LMemoryBarrier* ins) {
- masm.memoryBarrier(ins->type());
-}
-
void CodeGenerator::visitWasmTruncateToInt32(LWasmTruncateToInt32* lir) {
auto input = ToFloatRegister(lir->input());
auto output = ToRegister(lir->output());
diff --git a/js/src/jit/arm/MacroAssembler-arm.cpp b/js/src/jit/arm/MacroAssembler-arm.cpp
index 50d5d6645c..be8348a1fc 100644
--- a/js/src/jit/arm/MacroAssembler-arm.cpp
+++ b/js/src/jit/arm/MacroAssembler-arm.cpp
@@ -5008,7 +5008,7 @@ static Register ComputePointerForAtomic(MacroAssembler& masm,
template <typename T>
static void CompareExchange(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
- Scalar::Type type, const Synchronization& sync,
+ Scalar::Type type, Synchronization sync,
const T& mem, Register oldval, Register newval,
Register output) {
bool signExtend = Scalar::isSignedIntType(type);
@@ -5087,15 +5087,13 @@ static void CompareExchange(MacroAssembler& masm,
masm.memoryBarrierAfter(sync);
}
-void MacroAssembler::compareExchange(Scalar::Type type,
- const Synchronization& sync,
+void MacroAssembler::compareExchange(Scalar::Type type, Synchronization sync,
const Address& address, Register oldval,
Register newval, Register output) {
CompareExchange(*this, nullptr, type, sync, address, oldval, newval, output);
}
-void MacroAssembler::compareExchange(Scalar::Type type,
- const Synchronization& sync,
+void MacroAssembler::compareExchange(Scalar::Type type, Synchronization sync,
const BaseIndex& address, Register oldval,
Register newval, Register output) {
CompareExchange(*this, nullptr, type, sync, address, oldval, newval, output);
@@ -5118,7 +5116,7 @@ void MacroAssembler::wasmCompareExchange(const wasm::MemoryAccessDesc& access,
template <typename T>
static void AtomicExchange(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
- Scalar::Type type, const Synchronization& sync,
+ Scalar::Type type, Synchronization sync,
const T& mem, Register value, Register output) {
bool signExtend = Scalar::isSignedIntType(type);
unsigned nbytes = Scalar::byteSize(type);
@@ -5175,15 +5173,13 @@ static void AtomicExchange(MacroAssembler& masm,
masm.memoryBarrierAfter(sync);
}
-void MacroAssembler::atomicExchange(Scalar::Type type,
- const Synchronization& sync,
+void MacroAssembler::atomicExchange(Scalar::Type type, Synchronization sync,
const Address& address, Register value,
Register output) {
AtomicExchange(*this, nullptr, type, sync, address, value, output);
}
-void MacroAssembler::atomicExchange(Scalar::Type type,
- const Synchronization& sync,
+void MacroAssembler::atomicExchange(Scalar::Type type, Synchronization sync,
const BaseIndex& address, Register value,
Register output) {
AtomicExchange(*this, nullptr, type, sync, address, value, output);
@@ -5225,8 +5221,8 @@ void MacroAssembler::wasmAtomicExchange(const wasm::MemoryAccessDesc& access,
template <typename T>
static void AtomicFetchOp(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
- Scalar::Type type, const Synchronization& sync,
- AtomicOp op, const Register& value, const T& mem,
+ Scalar::Type type, Synchronization sync, AtomicOp op,
+ const Register& value, const T& mem,
Register flagTemp, Register output) {
bool signExtend = Scalar::isSignedIntType(type);
unsigned nbytes = Scalar::byteSize(type);
@@ -5274,19 +5270,19 @@ static void AtomicFetchOp(MacroAssembler& masm,
}
switch (op) {
- case AtomicFetchAddOp:
+ case AtomicOp::Add:
masm.as_add(scratch, output, O2Reg(value));
break;
- case AtomicFetchSubOp:
+ case AtomicOp::Sub:
masm.as_sub(scratch, output, O2Reg(value));
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
masm.as_and(scratch, output, O2Reg(value));
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
masm.as_orr(scratch, output, O2Reg(value));
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
masm.as_eor(scratch, output, O2Reg(value));
break;
default:
@@ -5312,17 +5308,17 @@ static void AtomicFetchOp(MacroAssembler& masm,
masm.memoryBarrierAfter(sync);
}
-void MacroAssembler::atomicFetchOp(Scalar::Type type,
- const Synchronization& sync, AtomicOp op,
- Register value, const Address& mem,
- Register temp, Register output) {
+void MacroAssembler::atomicFetchOp(Scalar::Type type, Synchronization sync,
+ AtomicOp op, Register value,
+ const Address& mem, Register temp,
+ Register output) {
AtomicFetchOp(*this, nullptr, type, sync, op, value, mem, temp, output);
}
-void MacroAssembler::atomicFetchOp(Scalar::Type type,
- const Synchronization& sync, AtomicOp op,
- Register value, const BaseIndex& mem,
- Register temp, Register output) {
+void MacroAssembler::atomicFetchOp(Scalar::Type type, Synchronization sync,
+ AtomicOp op, Register value,
+ const BaseIndex& mem, Register temp,
+ Register output) {
AtomicFetchOp(*this, nullptr, type, sync, op, value, mem, temp, output);
}
@@ -5357,8 +5353,8 @@ void MacroAssembler::wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access,
template <typename T>
static void AtomicEffectOp(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
- Scalar::Type type, const Synchronization& sync,
- AtomicOp op, const Register& value, const T& mem,
+ Scalar::Type type, Synchronization sync, AtomicOp op,
+ const Register& value, const T& mem,
Register flagTemp) {
unsigned nbytes = Scalar::byteSize(type);
@@ -5396,19 +5392,19 @@ static void AtomicEffectOp(MacroAssembler& masm,
}
switch (op) {
- case AtomicFetchAddOp:
+ case AtomicOp::Add:
masm.as_add(scratch, scratch, O2Reg(value));
break;
- case AtomicFetchSubOp:
+ case AtomicOp::Sub:
masm.as_sub(scratch, scratch, O2Reg(value));
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
masm.as_and(scratch, scratch, O2Reg(value));
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
masm.as_orr(scratch, scratch, O2Reg(value));
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
masm.as_eor(scratch, scratch, O2Reg(value));
break;
default:
@@ -5451,7 +5447,7 @@ void MacroAssembler::wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access,
template <typename T>
static void AtomicLoad64(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
- const Synchronization& sync, const T& mem,
+ Synchronization sync, const T& mem,
Register64 output) {
MOZ_ASSERT((output.low.code() & 1) == 0);
MOZ_ASSERT(output.low.code() + 1 == output.high.code());
@@ -5495,7 +5491,7 @@ void MacroAssembler::wasmAtomicLoad64(const wasm::MemoryAccessDesc& access,
template <typename T>
static void CompareExchange64(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
- const Synchronization& sync, const T& mem,
+ Synchronization sync, const T& mem,
Register64 expect, Register64 replace,
Register64 output) {
MOZ_ASSERT(expect != replace && replace != output && output != expect);
@@ -5556,13 +5552,13 @@ void MacroAssembler::wasmCompareExchange64(const wasm::MemoryAccessDesc& access,
output);
}
-void MacroAssembler::compareExchange64(const Synchronization& sync,
- const Address& mem, Register64 expect,
- Register64 replace, Register64 output) {
+void MacroAssembler::compareExchange64(Synchronization sync, const Address& mem,
+ Register64 expect, Register64 replace,
+ Register64 output) {
CompareExchange64(*this, nullptr, sync, mem, expect, replace, output);
}
-void MacroAssembler::compareExchange64(const Synchronization& sync,
+void MacroAssembler::compareExchange64(Synchronization sync,
const BaseIndex& mem, Register64 expect,
Register64 replace, Register64 output) {
CompareExchange64(*this, nullptr, sync, mem, expect, replace, output);
@@ -5571,7 +5567,7 @@ void MacroAssembler::compareExchange64(const Synchronization& sync,
template <typename T>
static void AtomicExchange64(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
- const Synchronization& sync, const T& mem,
+ Synchronization sync, const T& mem,
Register64 value, Register64 output) {
MOZ_ASSERT(output != value);
@@ -5624,13 +5620,12 @@ void MacroAssembler::wasmAtomicExchange64(const wasm::MemoryAccessDesc& access,
WasmAtomicExchange64(*this, access, mem, value, output);
}
-void MacroAssembler::atomicExchange64(const Synchronization& sync,
- const Address& mem, Register64 value,
- Register64 output) {
+void MacroAssembler::atomicExchange64(Synchronization sync, const Address& mem,
+ Register64 value, Register64 output) {
AtomicExchange64(*this, nullptr, sync, mem, value, output);
}
-void MacroAssembler::atomicExchange64(const Synchronization& sync,
+void MacroAssembler::atomicExchange64(Synchronization sync,
const BaseIndex& mem, Register64 value,
Register64 output) {
AtomicExchange64(*this, nullptr, sync, mem, value, output);
@@ -5639,9 +5634,8 @@ void MacroAssembler::atomicExchange64(const Synchronization& sync,
template <typename T>
static void AtomicFetchOp64(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
- const Synchronization& sync, AtomicOp op,
- Register64 value, const T& mem, Register64 temp,
- Register64 output) {
+ Synchronization sync, AtomicOp op, Register64 value,
+ const T& mem, Register64 temp, Register64 output) {
MOZ_ASSERT(temp.low != InvalidReg && temp.high != InvalidReg);
MOZ_ASSERT(output != value);
MOZ_ASSERT(temp != value);
@@ -5671,23 +5665,23 @@ static void AtomicFetchOp64(MacroAssembler& masm,
FaultingCodeOffset(load.getOffset()));
}
switch (op) {
- case AtomicFetchAddOp:
+ case AtomicOp::Add:
masm.as_add(temp.low, output.low, O2Reg(value.low), SetCC);
masm.as_adc(temp.high, output.high, O2Reg(value.high));
break;
- case AtomicFetchSubOp:
+ case AtomicOp::Sub:
masm.as_sub(temp.low, output.low, O2Reg(value.low), SetCC);
masm.as_sbc(temp.high, output.high, O2Reg(value.high));
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
masm.as_and(temp.low, output.low, O2Reg(value.low));
masm.as_and(temp.high, output.high, O2Reg(value.high));
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
masm.as_orr(temp.low, output.low, O2Reg(value.low));
masm.as_orr(temp.high, output.high, O2Reg(value.high));
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
masm.as_eor(temp.low, output.low, O2Reg(value.low));
masm.as_eor(temp.high, output.high, O2Reg(value.high));
break;
@@ -5725,25 +5719,25 @@ void MacroAssembler::wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access,
WasmAtomicFetchOp64(*this, access, op, value, mem, temp, output);
}
-void MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op,
+void MacroAssembler::atomicFetchOp64(Synchronization sync, AtomicOp op,
Register64 value, const Address& mem,
Register64 temp, Register64 output) {
AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, output);
}
-void MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op,
+void MacroAssembler::atomicFetchOp64(Synchronization sync, AtomicOp op,
Register64 value, const BaseIndex& mem,
Register64 temp, Register64 output) {
AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, output);
}
-void MacroAssembler::atomicEffectOp64(const Synchronization& sync, AtomicOp op,
+void MacroAssembler::atomicEffectOp64(Synchronization sync, AtomicOp op,
Register64 value, const Address& mem,
Register64 temp) {
AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, temp);
}
-void MacroAssembler::atomicEffectOp64(const Synchronization& sync, AtomicOp op,
+void MacroAssembler::atomicEffectOp64(Synchronization sync, AtomicOp op,
Register64 value, const BaseIndex& mem,
Register64 temp) {
AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, temp);
@@ -5754,7 +5748,7 @@ void MacroAssembler::atomicEffectOp64(const Synchronization& sync, AtomicOp op,
template <typename T>
static void CompareExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
- const Synchronization& sync, const T& mem,
+ Synchronization sync, const T& mem,
Register oldval, Register newval, Register temp,
AnyRegister output) {
if (arrayType == Scalar::Uint32) {
@@ -5766,15 +5760,14 @@ static void CompareExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
}
void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
- const Synchronization& sync,
- const Address& mem, Register oldval,
- Register newval, Register temp,
- AnyRegister output) {
+ Synchronization sync, const Address& mem,
+ Register oldval, Register newval,
+ Register temp, AnyRegister output) {
CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval, temp, output);
}
void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
- const Synchronization& sync,
+ Synchronization sync,
const BaseIndex& mem, Register oldval,
Register newval, Register temp,
AnyRegister output) {
@@ -5783,9 +5776,8 @@ void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
template <typename T>
static void AtomicExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
- const Synchronization& sync, const T& mem,
- Register value, Register temp,
- AnyRegister output) {
+ Synchronization sync, const T& mem, Register value,
+ Register temp, AnyRegister output) {
if (arrayType == Scalar::Uint32) {
masm.atomicExchange(arrayType, sync, mem, value, temp);
masm.convertUInt32ToDouble(temp, output.fpu());
@@ -5795,14 +5787,14 @@ static void AtomicExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
}
void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
- const Synchronization& sync,
- const Address& mem, Register value,
- Register temp, AnyRegister output) {
+ Synchronization sync, const Address& mem,
+ Register value, Register temp,
+ AnyRegister output) {
AtomicExchangeJS(*this, arrayType, sync, mem, value, temp, output);
}
void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
- const Synchronization& sync,
+ Synchronization sync,
const BaseIndex& mem, Register value,
Register temp, AnyRegister output) {
AtomicExchangeJS(*this, arrayType, sync, mem, value, temp, output);
@@ -5810,9 +5802,9 @@ void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
template <typename T>
static void AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
- Register value, const T& mem, Register temp1,
- Register temp2, AnyRegister output) {
+ Synchronization sync, AtomicOp op, Register value,
+ const T& mem, Register temp1, Register temp2,
+ AnyRegister output) {
if (arrayType == Scalar::Uint32) {
masm.atomicFetchOp(arrayType, sync, op, value, mem, temp2, temp1);
masm.convertUInt32ToDouble(temp1, output.fpu());
@@ -5822,7 +5814,7 @@ static void AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType,
}
void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
+ Synchronization sync, AtomicOp op,
Register value, const Address& mem,
Register temp1, Register temp2,
AnyRegister output) {
@@ -5830,7 +5822,7 @@ void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
}
void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
+ Synchronization sync, AtomicOp op,
Register value, const BaseIndex& mem,
Register temp1, Register temp2,
AnyRegister output) {
@@ -5838,14 +5830,14 @@ void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
}
void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
+ Synchronization sync, AtomicOp op,
Register value, const BaseIndex& mem,
Register temp) {
AtomicEffectOp(*this, nullptr, arrayType, sync, op, value, mem, temp);
}
void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
+ Synchronization sync, AtomicOp op,
Register value, const Address& mem,
Register temp) {
AtomicEffectOp(*this, nullptr, arrayType, sync, op, value, mem, temp);
@@ -5854,25 +5846,23 @@ void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
// ========================================================================
// Primitive atomic operations.
-void MacroAssembler::atomicLoad64(const Synchronization& sync,
- const Address& mem, Register64 output) {
+void MacroAssembler::atomicLoad64(Synchronization sync, const Address& mem,
+ Register64 output) {
AtomicLoad64(*this, nullptr, sync, mem, output);
}
-void MacroAssembler::atomicLoad64(const Synchronization& sync,
- const BaseIndex& mem, Register64 output) {
+void MacroAssembler::atomicLoad64(Synchronization sync, const BaseIndex& mem,
+ Register64 output) {
AtomicLoad64(*this, nullptr, sync, mem, output);
}
-void MacroAssembler::atomicStore64(const Synchronization& sync,
- const Address& mem, Register64 value,
- Register64 temp) {
+void MacroAssembler::atomicStore64(Synchronization sync, const Address& mem,
+ Register64 value, Register64 temp) {
AtomicExchange64(*this, nullptr, sync, mem, value, temp);
}
-void MacroAssembler::atomicStore64(const Synchronization& sync,
- const BaseIndex& mem, Register64 value,
- Register64 temp) {
+void MacroAssembler::atomicStore64(Synchronization sync, const BaseIndex& mem,
+ Register64 value, Register64 temp) {
AtomicExchange64(*this, nullptr, sync, mem, value, temp);
}
diff --git a/js/src/jit/arm64/CodeGenerator-arm64.cpp b/js/src/jit/arm64/CodeGenerator-arm64.cpp
index ff3ea96a7d..a232135419 100644
--- a/js/src/jit/arm64/CodeGenerator-arm64.cpp
+++ b/js/src/jit/arm64/CodeGenerator-arm64.cpp
@@ -2563,10 +2563,6 @@ void CodeGenerator::visitWasmStoreI64(LWasmStoreI64* lir) {
ToRegister(lir->memoryBase()), ToRegister(lir->ptr()));
}
-void CodeGenerator::visitMemoryBarrier(LMemoryBarrier* ins) {
- masm.memoryBarrier(ins->type());
-}
-
void CodeGenerator::visitWasmAddOffset(LWasmAddOffset* lir) {
MWasmAddOffset* mir = lir->mir();
Register base = ToRegister(lir->base());
diff --git a/js/src/jit/arm64/MacroAssembler-arm64.cpp b/js/src/jit/arm64/MacroAssembler-arm64.cpp
index 682f69df59..e3ec2494ff 100644
--- a/js/src/jit/arm64/MacroAssembler-arm64.cpp
+++ b/js/src/jit/arm64/MacroAssembler-arm64.cpp
@@ -2324,8 +2324,8 @@ template <typename T>
static void CompareExchange(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
Scalar::Type type, Width targetWidth,
- const Synchronization& sync, const T& mem,
- Register oldval, Register newval, Register output) {
+ Synchronization sync, const T& mem, Register oldval,
+ Register newval, Register output) {
MOZ_ASSERT(oldval != output && newval != output);
vixl::UseScratchRegisterScope temps(&masm);
@@ -2395,8 +2395,8 @@ template <typename T>
static void AtomicExchange(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
Scalar::Type type, Width targetWidth,
- const Synchronization& sync, const T& mem,
- Register value, Register output) {
+ Synchronization sync, const T& mem, Register value,
+ Register output) {
MOZ_ASSERT(value != output);
vixl::UseScratchRegisterScope temps(&masm);
@@ -2458,9 +2458,8 @@ template <bool wantResult, typename T>
static void AtomicFetchOp(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
Scalar::Type type, Width targetWidth,
- const Synchronization& sync, AtomicOp op,
- const T& mem, Register value, Register temp,
- Register output) {
+ Synchronization sync, AtomicOp op, const T& mem,
+ Register value, Register temp, Register output) {
MOZ_ASSERT(value != output);
MOZ_ASSERT(value != temp);
MOZ_ASSERT_IF(wantResult, output != temp);
@@ -2514,25 +2513,25 @@ static void AtomicFetchOp(MacroAssembler& masm,
}
switch (op) {
- case AtomicFetchAddOp:
+ case AtomicOp::Add:
FETCH_OP_CASE(add, value);
break;
- case AtomicFetchSubOp: {
+ case AtomicOp::Sub: {
Register scratch = temps.AcquireX().asUnsized();
masm.Neg(X(scratch), X(value));
FETCH_OP_CASE(add, scratch);
break;
}
- case AtomicFetchAndOp: {
+ case AtomicOp::And: {
Register scratch = temps.AcquireX().asUnsized();
masm.Eor(X(scratch), X(value), Operand(~0));
FETCH_OP_CASE(clr, scratch);
break;
}
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
FETCH_OP_CASE(set, value);
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
FETCH_OP_CASE(eor, value);
break;
}
@@ -2558,19 +2557,19 @@ static void AtomicFetchOp(MacroAssembler& masm,
masm.bind(&again);
LoadExclusive(masm, access, type, targetWidth, ptr, output);
switch (op) {
- case AtomicFetchAddOp:
+ case AtomicOp::Add:
masm.Add(X(temp), X(output), X(value));
break;
- case AtomicFetchSubOp:
+ case AtomicOp::Sub:
masm.Sub(X(temp), X(output), X(value));
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
masm.And(X(temp), X(output), X(value));
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
masm.Orr(X(temp), X(output), X(value));
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
masm.Eor(X(temp), X(output), X(value));
break;
}
@@ -2583,72 +2582,69 @@ static void AtomicFetchOp(MacroAssembler& masm,
masm.memoryBarrierAfter(sync);
}
-void MacroAssembler::compareExchange(Scalar::Type type,
- const Synchronization& sync,
+void MacroAssembler::compareExchange(Scalar::Type type, Synchronization sync,
const Address& mem, Register oldval,
Register newval, Register output) {
CompareExchange(*this, nullptr, type, Width::_32, sync, mem, oldval, newval,
output);
}
-void MacroAssembler::compareExchange(Scalar::Type type,
- const Synchronization& sync,
+void MacroAssembler::compareExchange(Scalar::Type type, Synchronization sync,
const BaseIndex& mem, Register oldval,
Register newval, Register output) {
CompareExchange(*this, nullptr, type, Width::_32, sync, mem, oldval, newval,
output);
}
-void MacroAssembler::compareExchange64(const Synchronization& sync,
- const Address& mem, Register64 expect,
- Register64 replace, Register64 output) {
+void MacroAssembler::compareExchange64(Synchronization sync, const Address& mem,
+ Register64 expect, Register64 replace,
+ Register64 output) {
CompareExchange(*this, nullptr, Scalar::Int64, Width::_64, sync, mem,
expect.reg, replace.reg, output.reg);
}
-void MacroAssembler::compareExchange64(const Synchronization& sync,
+void MacroAssembler::compareExchange64(Synchronization sync,
const BaseIndex& mem, Register64 expect,
Register64 replace, Register64 output) {
CompareExchange(*this, nullptr, Scalar::Int64, Width::_64, sync, mem,
expect.reg, replace.reg, output.reg);
}
-void MacroAssembler::atomicExchange64(const Synchronization& sync,
- const Address& mem, Register64 value,
- Register64 output) {
+void MacroAssembler::atomicExchange64(Synchronization sync, const Address& mem,
+ Register64 value, Register64 output) {
AtomicExchange(*this, nullptr, Scalar::Int64, Width::_64, sync, mem,
value.reg, output.reg);
}
-void MacroAssembler::atomicExchange64(const Synchronization& sync,
+void MacroAssembler::atomicExchange64(Synchronization sync,
const BaseIndex& mem, Register64 value,
Register64 output) {
AtomicExchange(*this, nullptr, Scalar::Int64, Width::_64, sync, mem,
value.reg, output.reg);
}
-void MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op,
+void MacroAssembler::atomicFetchOp64(Synchronization sync, AtomicOp op,
Register64 value, const Address& mem,
Register64 temp, Register64 output) {
AtomicFetchOp<true>(*this, nullptr, Scalar::Int64, Width::_64, sync, op, mem,
value.reg, temp.reg, output.reg);
}
-void MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op,
+void MacroAssembler::atomicFetchOp64(Synchronization sync, AtomicOp op,
Register64 value, const BaseIndex& mem,
Register64 temp, Register64 output) {
AtomicFetchOp<true>(*this, nullptr, Scalar::Int64, Width::_64, sync, op, mem,
value.reg, temp.reg, output.reg);
}
-void MacroAssembler::atomicEffectOp64(const Synchronization& sync, AtomicOp op,
+void MacroAssembler::atomicEffectOp64(Synchronization sync, AtomicOp op,
Register64 value, const Address& mem,
Register64 temp) {
AtomicFetchOp<false>(*this, nullptr, Scalar::Int64, Width::_64, sync, op, mem,
value.reg, temp.reg, temp.reg);
}
-void MacroAssembler::atomicEffectOp64(const Synchronization& sync, AtomicOp op,
+void MacroAssembler::atomicEffectOp64(Synchronization sync, AtomicOp op,
Register64 value, const BaseIndex& mem,
Register64 temp) {
AtomicFetchOp<false>(*this, nullptr, Scalar::Int64, Width::_64, sync, op, mem,
@@ -2669,15 +2665,13 @@ void MacroAssembler::wasmCompareExchange(const wasm::MemoryAccessDesc& access,
oldval, newval, output);
}
-void MacroAssembler::atomicExchange(Scalar::Type type,
- const Synchronization& sync,
+void MacroAssembler::atomicExchange(Scalar::Type type, Synchronization sync,
const Address& mem, Register value,
Register output) {
AtomicExchange(*this, nullptr, type, Width::_32, sync, mem, value, output);
}
-void MacroAssembler::atomicExchange(Scalar::Type type,
- const Synchronization& sync,
+void MacroAssembler::atomicExchange(Scalar::Type type, Synchronization sync,
const BaseIndex& mem, Register value,
Register output) {
AtomicExchange(*this, nullptr, type, Width::_32, sync, mem, value, output);
@@ -2697,18 +2691,18 @@ void MacroAssembler::wasmAtomicExchange(const wasm::MemoryAccessDesc& access,
value, output);
}
-void MacroAssembler::atomicFetchOp(Scalar::Type type,
- const Synchronization& sync, AtomicOp op,
- Register value, const Address& mem,
- Register temp, Register output) {
+void MacroAssembler::atomicFetchOp(Scalar::Type type, Synchronization sync,
+ AtomicOp op, Register value,
+ const Address& mem, Register temp,
+ Register output) {
AtomicFetchOp<true>(*this, nullptr, type, Width::_32, sync, op, mem, value,
temp, output);
}
-void MacroAssembler::atomicFetchOp(Scalar::Type type,
- const Synchronization& sync, AtomicOp op,
- Register value, const BaseIndex& mem,
- Register temp, Register output) {
+void MacroAssembler::atomicFetchOp(Scalar::Type type, Synchronization sync,
+ AtomicOp op, Register value,
+ const BaseIndex& mem, Register temp,
+ Register output) {
AtomicFetchOp<true>(*this, nullptr, type, Width::_32, sync, op, mem, value,
temp, output);
}
@@ -2804,7 +2798,7 @@ void MacroAssembler::wasmAtomicEffectOp64(const wasm::MemoryAccessDesc& access,
template <typename T>
static void CompareExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
- const Synchronization& sync, const T& mem,
+ Synchronization sync, const T& mem,
Register oldval, Register newval, Register temp,
AnyRegister output) {
if (arrayType == Scalar::Uint32) {
@@ -2816,15 +2810,14 @@ static void CompareExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
}
void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
- const Synchronization& sync,
- const Address& mem, Register oldval,
- Register newval, Register temp,
- AnyRegister output) {
+ Synchronization sync, const Address& mem,
+ Register oldval, Register newval,
+ Register temp, AnyRegister output) {
CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval, temp, output);
}
void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
- const Synchronization& sync,
+ Synchronization sync,
const BaseIndex& mem, Register oldval,
Register newval, Register temp,
AnyRegister output) {
@@ -2833,9 +2826,8 @@ void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
template <typename T>
static void AtomicExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
- const Synchronization& sync, const T& mem,
- Register value, Register temp,
- AnyRegister output) {
+ Synchronization sync, const T& mem, Register value,
+ Register temp, AnyRegister output) {
if (arrayType == Scalar::Uint32) {
masm.atomicExchange(arrayType, sync, mem, value, temp);
masm.convertUInt32ToDouble(temp, output.fpu());
@@ -2845,14 +2837,14 @@ static void AtomicExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
}
void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
- const Synchronization& sync,
- const Address& mem, Register value,
- Register temp, AnyRegister output) {
+ Synchronization sync, const Address& mem,
+ Register value, Register temp,
+ AnyRegister output) {
AtomicExchangeJS(*this, arrayType, sync, mem, value, temp, output);
}
void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
- const Synchronization& sync,
+ Synchronization sync,
const BaseIndex& mem, Register value,
Register temp, AnyRegister output) {
AtomicExchangeJS(*this, arrayType, sync, mem, value, temp, output);
@@ -2860,9 +2852,9 @@ void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
template <typename T>
static void AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
- Register value, const T& mem, Register temp1,
- Register temp2, AnyRegister output) {
+ Synchronization sync, AtomicOp op, Register value,
+ const T& mem, Register temp1, Register temp2,
+ AnyRegister output) {
if (arrayType == Scalar::Uint32) {
masm.atomicFetchOp(arrayType, sync, op, value, mem, temp2, temp1);
masm.convertUInt32ToDouble(temp1, output.fpu());
@@ -2872,7 +2864,7 @@ static void AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType,
}
void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
+ Synchronization sync, AtomicOp op,
Register value, const Address& mem,
Register temp1, Register temp2,
AnyRegister output) {
@@ -2880,7 +2872,7 @@ void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
}
void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
+ Synchronization sync, AtomicOp op,
Register value, const BaseIndex& mem,
Register temp1, Register temp2,
AnyRegister output) {
@@ -2888,7 +2880,7 @@ void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
}
void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
+ Synchronization sync, AtomicOp op,
Register value, const BaseIndex& mem,
Register temp) {
AtomicFetchOp<false>(*this, nullptr, arrayType, Width::_32, sync, op, mem,
@@ -2896,7 +2888,7 @@ void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
}
void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
+ Synchronization sync, AtomicOp op,
Register value, const Address& mem,
Register temp) {
AtomicFetchOp<false>(*this, nullptr, arrayType, Width::_32, sync, op, mem,
diff --git a/js/src/jit/loong64/Assembler-loong64.cpp b/js/src/jit/loong64/Assembler-loong64.cpp
index 6c7a5f53da..07dac546c1 100644
--- a/js/src/jit/loong64/Assembler-loong64.cpp
+++ b/js/src/jit/loong64/Assembler-loong64.cpp
@@ -103,15 +103,15 @@ uint32_t js::jit::SA3(uint32_t value) {
}
Register js::jit::toRK(Instruction& i) {
- return Register::FromCode((i.encode() & RKMask) >> RKShift);
+ return Register::FromCode(((i.encode() >> RKShift) & RKMask));
}
Register js::jit::toRJ(Instruction& i) {
- return Register::FromCode((i.encode() & RJMask) >> RJShift);
+ return Register::FromCode(((i.encode() >> RJShift) & RJMask));
}
Register js::jit::toRD(Instruction& i) {
- return Register::FromCode((i.encode() & RDMask) >> RDShift);
+ return Register::FromCode(((i.encode() >> RDShift) & RDMask));
}
Register js::jit::toR(Instruction& i) {
diff --git a/js/src/jit/loong64/Assembler-loong64.h b/js/src/jit/loong64/Assembler-loong64.h
index 4e0b8d6b66..a385d71f5f 100644
--- a/js/src/jit/loong64/Assembler-loong64.h
+++ b/js/src/jit/loong64/Assembler-loong64.h
@@ -309,6 +309,7 @@ static const uint32_t Imm26Shift = 0;
static const uint32_t Imm26Bits = 26;
static const uint32_t CODEShift = 0;
static const uint32_t CODEBits = 15;
+static const uint32_t HINTBits = 5;
// LoongArch instruction field bit masks.
static const uint32_t RJMask = (1 << RJBits) - 1;
@@ -316,7 +317,9 @@ static const uint32_t RKMask = (1 << RKBits) - 1;
static const uint32_t RDMask = (1 << RDBits) - 1;
static const uint32_t SA2Mask = (1 << SA2Bits) - 1;
static const uint32_t SA3Mask = (1 << SA3Bits) - 1;
+static const uint32_t CDMask = (1 << CDBits) - 1;
static const uint32_t CONDMask = (1 << CONDBits) - 1;
+static const uint32_t HINTMask = (1 << HINTBits) - 1;
static const uint32_t LSBWMask = (1 << LSBWBits) - 1;
static const uint32_t LSBDMask = (1 << LSBDBits) - 1;
static const uint32_t MSBWMask = (1 << MSBWBits) - 1;
@@ -1611,7 +1614,7 @@ class InstReg : public Instruction {
InstReg(OpcodeField op, int32_t cond, FloatRegister fk, FloatRegister fj,
AssemblerLOONG64::FPConditionBit cd)
: Instruction(op | (cond & CONDMask) << CONDShift | FK(fk) | FJ(fj) |
- (cd & RDMask)) {
+ (cd & CDMask)) {
MOZ_ASSERT(is_uintN(cond, 5));
}
@@ -1700,7 +1703,7 @@ class InstImm : public Instruction {
}
InstImm(OpcodeField op, int32_t si12, Register rj, int32_t hint)
: Instruction(op | (si12 & Imm12Mask) << Imm12Shift | RJ(rj) |
- (hint & RDMask)) {
+ (hint & HINTMask)) {
MOZ_ASSERT(op == op_preld);
}
InstImm(OpcodeField op, int32_t msb, int32_t lsb, Register rj, Register rd,
@@ -1738,7 +1741,9 @@ class InstImm : public Instruction {
uint32_t extractRJ() {
return extractBitField(RJShift + RJBits - 1, RJShift);
}
- void setRJ(uint32_t rj) { data = (data & ~RJMask) | (rj << RJShift); }
+ void setRJ(uint32_t rj) {
+ data = (data & ~(RJMask << RJShift)) | (rj << RJShift);
+ }
uint32_t extractRD() {
return extractBitField(RDShift + RDBits - 1, RDShift);
}
diff --git a/js/src/jit/loong64/CodeGenerator-loong64.cpp b/js/src/jit/loong64/CodeGenerator-loong64.cpp
index 4c4dfd18ff..76d3047680 100644
--- a/js/src/jit/loong64/CodeGenerator-loong64.cpp
+++ b/js/src/jit/loong64/CodeGenerator-loong64.cpp
@@ -1988,10 +1988,6 @@ void CodeGenerator::visitNotF(LNotF* ins) {
Assembler::DoubleEqualOrUnordered);
}
-void CodeGenerator::visitMemoryBarrier(LMemoryBarrier* ins) {
- masm.memoryBarrier(ins->type());
-}
-
void CodeGenerator::visitWasmLoad(LWasmLoad* lir) { emitWasmLoad(lir); }
void CodeGenerator::visitWasmStore(LWasmStore* lir) { emitWasmStore(lir); }
diff --git a/js/src/jit/loong64/MacroAssembler-loong64.cpp b/js/src/jit/loong64/MacroAssembler-loong64.cpp
index 528c120058..1c07f7f91a 100644
--- a/js/src/jit/loong64/MacroAssembler-loong64.cpp
+++ b/js/src/jit/loong64/MacroAssembler-loong64.cpp
@@ -3357,7 +3357,7 @@ void MacroAssembler::convertIntPtrToDouble(Register src, FloatRegister dest) {
template <typename T>
static void CompareExchange(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
- Scalar::Type type, const Synchronization& sync,
+ Scalar::Type type, Synchronization sync,
const T& mem, Register oldval, Register newval,
Register valueTemp, Register offsetTemp,
Register maskTemp, Register output) {
@@ -3463,7 +3463,7 @@ static void CompareExchange(MacroAssembler& masm,
template <typename T>
static void CompareExchange64(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
- const Synchronization& sync, const T& mem,
+ Synchronization sync, const T& mem,
Register64 expect, Register64 replace,
Register64 output) {
MOZ_ASSERT(expect != output && replace != output);
@@ -3499,7 +3499,7 @@ static void CompareExchange64(MacroAssembler& masm,
template <typename T>
static void AtomicExchange(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
- Scalar::Type type, const Synchronization& sync,
+ Scalar::Type type, Synchronization sync,
const T& mem, Register value, Register valueTemp,
Register offsetTemp, Register maskTemp,
Register output) {
@@ -3602,7 +3602,7 @@ static void AtomicExchange(MacroAssembler& masm,
template <typename T>
static void AtomicExchange64(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
- const Synchronization& sync, const T& mem,
+ Synchronization sync, const T& mem,
Register64 value, Register64 output) {
MOZ_ASSERT(value != output);
ScratchRegisterScope scratch(masm);
@@ -3633,10 +3633,10 @@ static void AtomicExchange64(MacroAssembler& masm,
template <typename T>
static void AtomicFetchOp(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
- Scalar::Type type, const Synchronization& sync,
- AtomicOp op, const T& mem, Register value,
- Register valueTemp, Register offsetTemp,
- Register maskTemp, Register output) {
+ Scalar::Type type, Synchronization sync, AtomicOp op,
+ const T& mem, Register value, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
ScratchRegisterScope scratch(masm);
SecondScratchRegisterScope scratch2(masm);
bool signExtend = Scalar::isSignedIntType(type);
@@ -3671,19 +3671,19 @@ static void AtomicFetchOp(MacroAssembler& masm,
masm.as_ll_w(output, scratch, 0);
switch (op) {
- case AtomicFetchAddOp:
+ case AtomicOp::Add:
masm.as_add_w(scratch2, output, value);
break;
- case AtomicFetchSubOp:
+ case AtomicOp::Sub:
masm.as_sub_w(scratch2, output, value);
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
masm.as_and(scratch2, output, value);
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
masm.as_or(scratch2, output, value);
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
masm.as_xor(scratch2, output, value);
break;
default:
@@ -3718,19 +3718,19 @@ static void AtomicFetchOp(MacroAssembler& masm,
masm.as_srl_w(output, scratch2, offsetTemp);
switch (op) {
- case AtomicFetchAddOp:
+ case AtomicOp::Add:
masm.as_add_w(valueTemp, output, value);
break;
- case AtomicFetchSubOp:
+ case AtomicOp::Sub:
masm.as_sub_w(valueTemp, output, value);
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
masm.as_and(valueTemp, output, value);
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
masm.as_or(valueTemp, output, value);
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
masm.as_xor(valueTemp, output, value);
break;
default:
@@ -3778,9 +3778,8 @@ static void AtomicFetchOp(MacroAssembler& masm,
template <typename T>
static void AtomicFetchOp64(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
- const Synchronization& sync, AtomicOp op,
- Register64 value, const T& mem, Register64 temp,
- Register64 output) {
+ Synchronization sync, AtomicOp op, Register64 value,
+ const T& mem, Register64 temp, Register64 output) {
MOZ_ASSERT(value != output);
MOZ_ASSERT(value != temp);
ScratchRegisterScope scratch(masm);
@@ -3801,19 +3800,19 @@ static void AtomicFetchOp64(MacroAssembler& masm,
masm.as_ll_d(output.reg, scratch, 0);
switch (op) {
- case AtomicFetchAddOp:
+ case AtomicOp::Add:
masm.as_add_d(temp.reg, output.reg, value.reg);
break;
- case AtomicFetchSubOp:
+ case AtomicOp::Sub:
masm.as_sub_d(temp.reg, output.reg, value.reg);
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
masm.as_and(temp.reg, output.reg, value.reg);
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
masm.as_or(temp.reg, output.reg, value.reg);
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
masm.as_xor(temp.reg, output.reg, value.reg);
break;
default:
@@ -3826,8 +3825,7 @@ static void AtomicFetchOp64(MacroAssembler& masm,
masm.memoryBarrierAfter(sync);
}
-void MacroAssembler::compareExchange(Scalar::Type type,
- const Synchronization& sync,
+void MacroAssembler::compareExchange(Scalar::Type type, Synchronization sync,
const Address& mem, Register oldval,
Register newval, Register valueTemp,
Register offsetTemp, Register maskTemp,
@@ -3836,8 +3834,7 @@ void MacroAssembler::compareExchange(Scalar::Type type,
offsetTemp, maskTemp, output);
}
-void MacroAssembler::compareExchange(Scalar::Type type,
- const Synchronization& sync,
+void MacroAssembler::compareExchange(Scalar::Type type, Synchronization sync,
const BaseIndex& mem, Register oldval,
Register newval, Register valueTemp,
Register offsetTemp, Register maskTemp,
@@ -3846,13 +3843,13 @@ void MacroAssembler::compareExchange(Scalar::Type type,
offsetTemp, maskTemp, output);
}
-void MacroAssembler::compareExchange64(const Synchronization& sync,
- const Address& mem, Register64 expect,
- Register64 replace, Register64 output) {
+void MacroAssembler::compareExchange64(Synchronization sync, const Address& mem,
+ Register64 expect, Register64 replace,
+ Register64 output) {
CompareExchange64(*this, nullptr, sync, mem, expect, replace, output);
}
-void MacroAssembler::compareExchange64(const Synchronization& sync,
+void MacroAssembler::compareExchange64(Synchronization sync,
const BaseIndex& mem, Register64 expect,
Register64 replace, Register64 output) {
CompareExchange64(*this, nullptr, sync, mem, expect, replace, output);
@@ -3894,8 +3891,7 @@ void MacroAssembler::wasmCompareExchange64(const wasm::MemoryAccessDesc& access,
output);
}
-void MacroAssembler::atomicExchange(Scalar::Type type,
- const Synchronization& sync,
+void MacroAssembler::atomicExchange(Scalar::Type type, Synchronization sync,
const Address& mem, Register value,
Register valueTemp, Register offsetTemp,
Register maskTemp, Register output) {
@@ -3903,8 +3899,7 @@ void MacroAssembler::atomicExchange(Scalar::Type type,
maskTemp, output);
}
-void MacroAssembler::atomicExchange(Scalar::Type type,
- const Synchronization& sync,
+void MacroAssembler::atomicExchange(Scalar::Type type, Synchronization sync,
const BaseIndex& mem, Register value,
Register valueTemp, Register offsetTemp,
Register maskTemp, Register output) {
@@ -3912,13 +3907,12 @@ void MacroAssembler::atomicExchange(Scalar::Type type,
maskTemp, output);
}
-void MacroAssembler::atomicExchange64(const Synchronization& sync,
- const Address& mem, Register64 value,
- Register64 output) {
+void MacroAssembler::atomicExchange64(Synchronization sync, const Address& mem,
+ Register64 value, Register64 output) {
AtomicExchange64(*this, nullptr, sync, mem, value, output);
}
-void MacroAssembler::atomicExchange64(const Synchronization& sync,
+void MacroAssembler::atomicExchange64(Synchronization sync,
const BaseIndex& mem, Register64 value,
Register64 output) {
AtomicExchange64(*this, nullptr, sync, mem, value, output);
@@ -3940,43 +3934,43 @@ void MacroAssembler::wasmAtomicExchange(const wasm::MemoryAccessDesc& access,
valueTemp, offsetTemp, maskTemp, output);
}
-void MacroAssembler::atomicFetchOp(Scalar::Type type,
- const Synchronization& sync, AtomicOp op,
- Register value, const Address& mem,
- Register valueTemp, Register offsetTemp,
- Register maskTemp, Register output) {
+void MacroAssembler::atomicFetchOp(Scalar::Type type, Synchronization sync,
+ AtomicOp op, Register value,
+ const Address& mem, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
AtomicFetchOp(*this, nullptr, type, sync, op, mem, value, valueTemp,
offsetTemp, maskTemp, output);
}
-void MacroAssembler::atomicFetchOp(Scalar::Type type,
- const Synchronization& sync, AtomicOp op,
- Register value, const BaseIndex& mem,
- Register valueTemp, Register offsetTemp,
- Register maskTemp, Register output) {
+void MacroAssembler::atomicFetchOp(Scalar::Type type, Synchronization sync,
+ AtomicOp op, Register value,
+ const BaseIndex& mem, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
AtomicFetchOp(*this, nullptr, type, sync, op, mem, value, valueTemp,
offsetTemp, maskTemp, output);
}
-void MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op,
+void MacroAssembler::atomicFetchOp64(Synchronization sync, AtomicOp op,
Register64 value, const Address& mem,
Register64 temp, Register64 output) {
AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, output);
}
-void MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op,
+void MacroAssembler::atomicFetchOp64(Synchronization sync, AtomicOp op,
Register64 value, const BaseIndex& mem,
Register64 temp, Register64 output) {
AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, output);
}
-void MacroAssembler::atomicEffectOp64(const Synchronization& sync, AtomicOp op,
+void MacroAssembler::atomicEffectOp64(Synchronization sync, AtomicOp op,
Register64 value, const Address& mem,
Register64 temp) {
AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, temp);
}
-void MacroAssembler::atomicEffectOp64(const Synchronization& sync, AtomicOp op,
+void MacroAssembler::atomicEffectOp64(Synchronization sync, AtomicOp op,
Register64 value, const BaseIndex& mem,
Register64 temp) {
AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, temp);
@@ -4003,10 +3997,9 @@ void MacroAssembler::wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access,
template <typename T>
static void AtomicEffectOp(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
- Scalar::Type type, const Synchronization& sync,
- AtomicOp op, const T& mem, Register value,
- Register valueTemp, Register offsetTemp,
- Register maskTemp) {
+ Scalar::Type type, Synchronization sync, AtomicOp op,
+ const T& mem, Register value, Register valueTemp,
+ Register offsetTemp, Register maskTemp) {
ScratchRegisterScope scratch(masm);
SecondScratchRegisterScope scratch2(masm);
unsigned nbytes = Scalar::byteSize(type);
@@ -4040,19 +4033,19 @@ static void AtomicEffectOp(MacroAssembler& masm,
masm.as_ll_w(scratch2, scratch, 0);
switch (op) {
- case AtomicFetchAddOp:
+ case AtomicOp::Add:
masm.as_add_w(scratch2, scratch2, value);
break;
- case AtomicFetchSubOp:
+ case AtomicOp::Sub:
masm.as_sub_w(scratch2, scratch2, value);
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
masm.as_and(scratch2, scratch2, value);
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
masm.as_or(scratch2, scratch2, value);
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
masm.as_xor(scratch2, scratch2, value);
break;
default:
@@ -4087,19 +4080,19 @@ static void AtomicEffectOp(MacroAssembler& masm,
masm.as_srl_w(valueTemp, scratch2, offsetTemp);
switch (op) {
- case AtomicFetchAddOp:
+ case AtomicOp::Add:
masm.as_add_w(valueTemp, valueTemp, value);
break;
- case AtomicFetchSubOp:
+ case AtomicOp::Sub:
masm.as_sub_w(valueTemp, valueTemp, value);
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
masm.as_and(valueTemp, valueTemp, value);
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
masm.as_or(valueTemp, valueTemp, value);
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
masm.as_xor(valueTemp, valueTemp, value);
break;
default:
@@ -4184,7 +4177,7 @@ void MacroAssembler::wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access,
template <typename T>
static void CompareExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
- const Synchronization& sync, const T& mem,
+ Synchronization sync, const T& mem,
Register oldval, Register newval,
Register valueTemp, Register offsetTemp,
Register maskTemp, Register temp,
@@ -4201,10 +4194,10 @@ static void CompareExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
template <typename T>
static void AtomicExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
- const Synchronization& sync, const T& mem,
- Register value, Register valueTemp,
- Register offsetTemp, Register maskTemp,
- Register temp, AnyRegister output) {
+ Synchronization sync, const T& mem, Register value,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register temp,
+ AnyRegister output) {
if (arrayType == Scalar::Uint32) {
masm.atomicExchange(arrayType, sync, mem, value, valueTemp, offsetTemp,
maskTemp, temp);
@@ -4217,8 +4210,8 @@ static void AtomicExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
template <typename T>
static void AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
- Register value, const T& mem, Register valueTemp,
+ Synchronization sync, AtomicOp op, Register value,
+ const T& mem, Register valueTemp,
Register offsetTemp, Register maskTemp,
Register temp, AnyRegister output) {
if (arrayType == Scalar::Uint32) {
@@ -4232,17 +4225,17 @@ static void AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType,
}
void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
- const Synchronization& sync,
- const Address& mem, Register oldval,
- Register newval, Register valueTemp,
- Register offsetTemp, Register maskTemp,
- Register temp, AnyRegister output) {
+ Synchronization sync, const Address& mem,
+ Register oldval, Register newval,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register temp,
+ AnyRegister output) {
CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval, valueTemp,
offsetTemp, maskTemp, temp, output);
}
void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
- const Synchronization& sync,
+ Synchronization sync,
const BaseIndex& mem, Register oldval,
Register newval, Register valueTemp,
Register offsetTemp, Register maskTemp,
@@ -4252,17 +4245,16 @@ void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
}
void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
- const Synchronization& sync,
- const Address& mem, Register value,
- Register valueTemp, Register offsetTemp,
- Register maskTemp, Register temp,
- AnyRegister output) {
+ Synchronization sync, const Address& mem,
+ Register value, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register temp, AnyRegister output) {
AtomicExchangeJS(*this, arrayType, sync, mem, value, valueTemp, offsetTemp,
maskTemp, temp, output);
}
void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
- const Synchronization& sync,
+ Synchronization sync,
const BaseIndex& mem, Register value,
Register valueTemp, Register offsetTemp,
Register maskTemp, Register temp,
@@ -4272,7 +4264,7 @@ void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
}
void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
+ Synchronization sync, AtomicOp op,
Register value, const Address& mem,
Register valueTemp, Register offsetTemp,
Register maskTemp, Register temp,
@@ -4282,7 +4274,7 @@ void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
}
void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
+ Synchronization sync, AtomicOp op,
Register value, const BaseIndex& mem,
Register valueTemp, Register offsetTemp,
Register maskTemp, Register temp,
@@ -4292,7 +4284,7 @@ void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
}
void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
+ Synchronization sync, AtomicOp op,
Register value, const BaseIndex& mem,
Register valueTemp, Register offsetTemp,
Register maskTemp) {
@@ -4301,7 +4293,7 @@ void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
}
void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
+ Synchronization sync, AtomicOp op,
Register value, const Address& mem,
Register valueTemp, Register offsetTemp,
Register maskTemp) {
diff --git a/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp b/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
index 424ddab061..f7f1d7a16d 100644
--- a/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
+++ b/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
@@ -1401,10 +1401,6 @@ void CodeGenerator::visitNotF(LNotF* ins) {
Assembler::DoubleEqualOrUnordered);
}
-void CodeGenerator::visitMemoryBarrier(LMemoryBarrier* ins) {
- masm.memoryBarrier(ins->type());
-}
-
void CodeGeneratorMIPSShared::generateInvalidateEpilogue() {
// Ensure that there is enough space in the buffer for the OsiPoint
// patching to occur. Otherwise, we could overwrite the invalidation
diff --git a/js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp b/js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp
index 052c76ba0f..284bbe0a12 100644
--- a/js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp
+++ b/js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp
@@ -2262,7 +2262,7 @@ void MacroAssembler::enterFakeExitFrameForWasm(Register cxreg, Register scratch,
template <typename T>
static void CompareExchange(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
- Scalar::Type type, const Synchronization& sync,
+ Scalar::Type type, Synchronization sync,
const T& mem, Register oldval, Register newval,
Register valueTemp, Register offsetTemp,
Register maskTemp, Register output) {
@@ -2366,8 +2366,7 @@ static void CompareExchange(MacroAssembler& masm,
masm.bind(&end);
}
-void MacroAssembler::compareExchange(Scalar::Type type,
- const Synchronization& sync,
+void MacroAssembler::compareExchange(Scalar::Type type, Synchronization sync,
const Address& mem, Register oldval,
Register newval, Register valueTemp,
Register offsetTemp, Register maskTemp,
@@ -2376,8 +2375,7 @@ void MacroAssembler::compareExchange(Scalar::Type type,
offsetTemp, maskTemp, output);
}
-void MacroAssembler::compareExchange(Scalar::Type type,
- const Synchronization& sync,
+void MacroAssembler::compareExchange(Scalar::Type type, Synchronization sync,
const BaseIndex& mem, Register oldval,
Register newval, Register valueTemp,
Register offsetTemp, Register maskTemp,
@@ -2407,7 +2405,7 @@ void MacroAssembler::wasmCompareExchange(const wasm::MemoryAccessDesc& access,
template <typename T>
static void AtomicExchange(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
- Scalar::Type type, const Synchronization& sync,
+ Scalar::Type type, Synchronization sync,
const T& mem, Register value, Register valueTemp,
Register offsetTemp, Register maskTemp,
Register output) {
@@ -2508,8 +2506,7 @@ static void AtomicExchange(MacroAssembler& masm,
masm.memoryBarrierAfter(sync);
}
-void MacroAssembler::atomicExchange(Scalar::Type type,
- const Synchronization& sync,
+void MacroAssembler::atomicExchange(Scalar::Type type, Synchronization sync,
const Address& mem, Register value,
Register valueTemp, Register offsetTemp,
Register maskTemp, Register output) {
@@ -2517,8 +2514,7 @@ void MacroAssembler::atomicExchange(Scalar::Type type,
maskTemp, output);
}
-void MacroAssembler::atomicExchange(Scalar::Type type,
- const Synchronization& sync,
+void MacroAssembler::atomicExchange(Scalar::Type type, Synchronization sync,
const BaseIndex& mem, Register value,
Register valueTemp, Register offsetTemp,
Register maskTemp, Register output) {
@@ -2545,10 +2541,10 @@ void MacroAssembler::wasmAtomicExchange(const wasm::MemoryAccessDesc& access,
template <typename T>
static void AtomicFetchOp(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
- Scalar::Type type, const Synchronization& sync,
- AtomicOp op, const T& mem, Register value,
- Register valueTemp, Register offsetTemp,
- Register maskTemp, Register output) {
+ Scalar::Type type, Synchronization sync, AtomicOp op,
+ const T& mem, Register value, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
bool signExtend = Scalar::isSignedIntType(type);
unsigned nbytes = Scalar::byteSize(type);
@@ -2580,19 +2576,19 @@ static void AtomicFetchOp(MacroAssembler& masm,
masm.as_ll(output, SecondScratchReg, 0);
switch (op) {
- case AtomicFetchAddOp:
+ case AtomicOp::Add:
masm.as_addu(ScratchRegister, output, value);
break;
- case AtomicFetchSubOp:
+ case AtomicOp::Sub:
masm.as_subu(ScratchRegister, output, value);
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
masm.as_and(ScratchRegister, output, value);
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
masm.as_or(ScratchRegister, output, value);
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
masm.as_xor(ScratchRegister, output, value);
break;
default:
@@ -2630,19 +2626,19 @@ static void AtomicFetchOp(MacroAssembler& masm,
masm.as_srlv(output, ScratchRegister, offsetTemp);
switch (op) {
- case AtomicFetchAddOp:
+ case AtomicOp::Add:
masm.as_addu(valueTemp, output, value);
break;
- case AtomicFetchSubOp:
+ case AtomicOp::Sub:
masm.as_subu(valueTemp, output, value);
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
masm.as_and(valueTemp, output, value);
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
masm.as_or(valueTemp, output, value);
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
masm.as_xor(valueTemp, output, value);
break;
default:
@@ -2688,20 +2684,20 @@ static void AtomicFetchOp(MacroAssembler& masm,
masm.memoryBarrierAfter(sync);
}
-void MacroAssembler::atomicFetchOp(Scalar::Type type,
- const Synchronization& sync, AtomicOp op,
- Register value, const Address& mem,
- Register valueTemp, Register offsetTemp,
- Register maskTemp, Register output) {
+void MacroAssembler::atomicFetchOp(Scalar::Type type, Synchronization sync,
+ AtomicOp op, Register value,
+ const Address& mem, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
AtomicFetchOp(*this, nullptr, type, sync, op, mem, value, valueTemp,
offsetTemp, maskTemp, output);
}
-void MacroAssembler::atomicFetchOp(Scalar::Type type,
- const Synchronization& sync, AtomicOp op,
- Register value, const BaseIndex& mem,
- Register valueTemp, Register offsetTemp,
- Register maskTemp, Register output) {
+void MacroAssembler::atomicFetchOp(Scalar::Type type, Synchronization sync,
+ AtomicOp op, Register value,
+ const BaseIndex& mem, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
AtomicFetchOp(*this, nullptr, type, sync, op, mem, value, valueTemp,
offsetTemp, maskTemp, output);
}
@@ -2727,10 +2723,9 @@ void MacroAssembler::wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access,
template <typename T>
static void AtomicEffectOp(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
- Scalar::Type type, const Synchronization& sync,
- AtomicOp op, const T& mem, Register value,
- Register valueTemp, Register offsetTemp,
- Register maskTemp) {
+ Scalar::Type type, Synchronization sync, AtomicOp op,
+ const T& mem, Register value, Register valueTemp,
+ Register offsetTemp, Register maskTemp) {
unsigned nbytes = Scalar::byteSize(type);
switch (nbytes) {
@@ -2761,19 +2756,19 @@ static void AtomicEffectOp(MacroAssembler& masm,
masm.as_ll(ScratchRegister, SecondScratchReg, 0);
switch (op) {
- case AtomicFetchAddOp:
+ case AtomicOp::Add:
masm.as_addu(ScratchRegister, ScratchRegister, value);
break;
- case AtomicFetchSubOp:
+ case AtomicOp::Sub:
masm.as_subu(ScratchRegister, ScratchRegister, value);
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
masm.as_and(ScratchRegister, ScratchRegister, value);
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
masm.as_or(ScratchRegister, ScratchRegister, value);
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
masm.as_xor(ScratchRegister, ScratchRegister, value);
break;
default:
@@ -2811,19 +2806,19 @@ static void AtomicEffectOp(MacroAssembler& masm,
masm.as_srlv(valueTemp, ScratchRegister, offsetTemp);
switch (op) {
- case AtomicFetchAddOp:
+ case AtomicOp::Add:
masm.as_addu(valueTemp, valueTemp, value);
break;
- case AtomicFetchSubOp:
+ case AtomicOp::Sub:
masm.as_subu(valueTemp, valueTemp, value);
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
masm.as_and(valueTemp, valueTemp, value);
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
masm.as_or(valueTemp, valueTemp, value);
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
masm.as_xor(valueTemp, valueTemp, value);
break;
default:
@@ -2875,7 +2870,7 @@ void MacroAssembler::wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access,
template <typename T>
static void CompareExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
- const Synchronization& sync, const T& mem,
+ Synchronization sync, const T& mem,
Register oldval, Register newval,
Register valueTemp, Register offsetTemp,
Register maskTemp, Register temp,
@@ -2891,17 +2886,17 @@ static void CompareExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
}
void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
- const Synchronization& sync,
- const Address& mem, Register oldval,
- Register newval, Register valueTemp,
- Register offsetTemp, Register maskTemp,
- Register temp, AnyRegister output) {
+ Synchronization sync, const Address& mem,
+ Register oldval, Register newval,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register temp,
+ AnyRegister output) {
CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval, valueTemp,
offsetTemp, maskTemp, temp, output);
}
void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
- const Synchronization& sync,
+ Synchronization sync,
const BaseIndex& mem, Register oldval,
Register newval, Register valueTemp,
Register offsetTemp, Register maskTemp,
@@ -2912,10 +2907,10 @@ void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
template <typename T>
static void AtomicExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
- const Synchronization& sync, const T& mem,
- Register value, Register valueTemp,
- Register offsetTemp, Register maskTemp,
- Register temp, AnyRegister output) {
+ Synchronization sync, const T& mem, Register value,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register temp,
+ AnyRegister output) {
if (arrayType == Scalar::Uint32) {
masm.atomicExchange(arrayType, sync, mem, value, valueTemp, offsetTemp,
maskTemp, temp);
@@ -2927,17 +2922,16 @@ static void AtomicExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
}
void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
- const Synchronization& sync,
- const Address& mem, Register value,
- Register valueTemp, Register offsetTemp,
- Register maskTemp, Register temp,
- AnyRegister output) {
+ Synchronization sync, const Address& mem,
+ Register value, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register temp, AnyRegister output) {
AtomicExchangeJS(*this, arrayType, sync, mem, value, valueTemp, offsetTemp,
maskTemp, temp, output);
}
void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
- const Synchronization& sync,
+ Synchronization sync,
const BaseIndex& mem, Register value,
Register valueTemp, Register offsetTemp,
Register maskTemp, Register temp,
@@ -2948,8 +2942,8 @@ void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
template <typename T>
static void AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
- Register value, const T& mem, Register valueTemp,
+ Synchronization sync, AtomicOp op, Register value,
+ const T& mem, Register valueTemp,
Register offsetTemp, Register maskTemp,
Register temp, AnyRegister output) {
if (arrayType == Scalar::Uint32) {
@@ -2963,7 +2957,7 @@ static void AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType,
}
void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
+ Synchronization sync, AtomicOp op,
Register value, const Address& mem,
Register valueTemp, Register offsetTemp,
Register maskTemp, Register temp,
@@ -2973,7 +2967,7 @@ void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
}
void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
+ Synchronization sync, AtomicOp op,
Register value, const BaseIndex& mem,
Register valueTemp, Register offsetTemp,
Register maskTemp, Register temp,
@@ -2983,7 +2977,7 @@ void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
}
void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
+ Synchronization sync, AtomicOp op,
Register value, const BaseIndex& mem,
Register valueTemp, Register offsetTemp,
Register maskTemp) {
@@ -2992,7 +2986,7 @@ void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
}
void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
+ Synchronization sync, AtomicOp op,
Register value, const Address& mem,
Register valueTemp, Register offsetTemp,
Register maskTemp) {
diff --git a/js/src/jit/mips32/MacroAssembler-mips32.cpp b/js/src/jit/mips32/MacroAssembler-mips32.cpp
index f4b3d557a5..747db53799 100644
--- a/js/src/jit/mips32/MacroAssembler-mips32.cpp
+++ b/js/src/jit/mips32/MacroAssembler-mips32.cpp
@@ -2745,27 +2745,27 @@ static void AtomicFetchOp64(MacroAssembler& masm,
masm.load64(Address(SecondScratchReg, 0), output);
switch (op) {
- case AtomicFetchAddOp:
+ case AtomicOp::Add:
masm.as_addu(temp.low, output.low, value.low);
masm.as_sltu(temp.high, temp.low, output.low);
masm.as_addu(temp.high, temp.high, output.high);
masm.as_addu(temp.high, temp.high, value.high);
break;
- case AtomicFetchSubOp:
+ case AtomicOp::Sub:
masm.as_sltu(temp.high, output.low, value.low);
masm.as_subu(temp.high, output.high, temp.high);
masm.as_subu(temp.low, output.low, value.low);
masm.as_subu(temp.high, temp.high, value.high);
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
masm.as_and(temp.low, output.low, value.low);
masm.as_and(temp.high, output.high, value.high);
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
masm.as_or(temp.low, output.low, value.low);
masm.as_or(temp.high, output.high, value.high);
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
masm.as_xor(temp.low, output.low, value.low);
masm.as_xor(temp.high, output.high, value.high);
break;
diff --git a/js/src/jit/mips64/MacroAssembler-mips64.cpp b/js/src/jit/mips64/MacroAssembler-mips64.cpp
index cbf66ccac4..1530bfcbc8 100644
--- a/js/src/jit/mips64/MacroAssembler-mips64.cpp
+++ b/js/src/jit/mips64/MacroAssembler-mips64.cpp
@@ -2611,7 +2611,7 @@ void MacroAssemblerMIPS64Compat::wasmStoreI64Impl(
template <typename T>
static void CompareExchange64(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
- const Synchronization& sync, const T& mem,
+ Synchronization sync, const T& mem,
Register64 expect, Register64 replace,
Register64 output) {
MOZ_ASSERT(expect != output && replace != output);
@@ -2658,13 +2658,13 @@ void MacroAssembler::wasmCompareExchange64(const wasm::MemoryAccessDesc& access,
output);
}
-void MacroAssembler::compareExchange64(const Synchronization& sync,
- const Address& mem, Register64 expect,
- Register64 replace, Register64 output) {
+void MacroAssembler::compareExchange64(Synchronization sync, const Address& mem,
+ Register64 expect, Register64 replace,
+ Register64 output) {
CompareExchange64(*this, nullptr, sync, mem, expect, replace, output);
}
-void MacroAssembler::compareExchange64(const Synchronization& sync,
+void MacroAssembler::compareExchange64(Synchronization sync,
const BaseIndex& mem, Register64 expect,
Register64 replace, Register64 output) {
CompareExchange64(*this, nullptr, sync, mem, expect, replace, output);
@@ -2673,7 +2673,7 @@ void MacroAssembler::compareExchange64(const Synchronization& sync,
template <typename T>
static void AtomicExchange64(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
- const Synchronization& sync, const T& mem,
+ Synchronization sync, const T& mem,
Register64 value, Register64 output) {
MOZ_ASSERT(value != output);
masm.computeEffectiveAddress(mem, SecondScratchReg);
@@ -2717,13 +2717,12 @@ void MacroAssembler::wasmAtomicExchange64(const wasm::MemoryAccessDesc& access,
WasmAtomicExchange64(*this, access, mem, src, output);
}
-void MacroAssembler::atomicExchange64(const Synchronization& sync,
- const Address& mem, Register64 value,
- Register64 output) {
+void MacroAssembler::atomicExchange64(Synchronization sync, const Address& mem,
+ Register64 value, Register64 output) {
AtomicExchange64(*this, nullptr, sync, mem, value, output);
}
-void MacroAssembler::atomicExchange64(const Synchronization& sync,
+void MacroAssembler::atomicExchange64(Synchronization sync,
const BaseIndex& mem, Register64 value,
Register64 output) {
AtomicExchange64(*this, nullptr, sync, mem, value, output);
@@ -2732,9 +2731,8 @@ void MacroAssembler::atomicExchange64(const Synchronization& sync,
template <typename T>
static void AtomicFetchOp64(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
- const Synchronization& sync, AtomicOp op,
- Register64 value, const T& mem, Register64 temp,
- Register64 output) {
+ Synchronization sync, AtomicOp op, Register64 value,
+ const T& mem, Register64 temp, Register64 output) {
MOZ_ASSERT(value != output);
MOZ_ASSERT(value != temp);
masm.computeEffectiveAddress(mem, SecondScratchReg);
@@ -2751,19 +2749,19 @@ static void AtomicFetchOp64(MacroAssembler& masm,
masm.as_lld(output.reg, SecondScratchReg, 0);
switch (op) {
- case AtomicFetchAddOp:
+ case AtomicOp::Add:
masm.as_daddu(temp.reg, output.reg, value.reg);
break;
- case AtomicFetchSubOp:
+ case AtomicOp::Sub:
masm.as_dsubu(temp.reg, output.reg, value.reg);
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
masm.as_and(temp.reg, output.reg, value.reg);
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
masm.as_or(temp.reg, output.reg, value.reg);
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
masm.as_xor(temp.reg, output.reg, value.reg);
break;
default:
@@ -2790,25 +2788,25 @@ void MacroAssembler::wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access,
AtomicFetchOp64(*this, &access, access.sync(), op, value, mem, temp, output);
}
-void MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op,
+void MacroAssembler::atomicFetchOp64(Synchronization sync, AtomicOp op,
Register64 value, const Address& mem,
Register64 temp, Register64 output) {
AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, output);
}
-void MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op,
+void MacroAssembler::atomicFetchOp64(Synchronization sync, AtomicOp op,
Register64 value, const BaseIndex& mem,
Register64 temp, Register64 output) {
AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, output);
}
-void MacroAssembler::atomicEffectOp64(const Synchronization& sync, AtomicOp op,
+void MacroAssembler::atomicEffectOp64(Synchronization sync, AtomicOp op,
Register64 value, const Address& mem,
Register64 temp) {
AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, temp);
}
-void MacroAssembler::atomicEffectOp64(const Synchronization& sync, AtomicOp op,
+void MacroAssembler::atomicEffectOp64(Synchronization sync, AtomicOp op,
Register64 value, const BaseIndex& mem,
Register64 temp) {
AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, temp);
diff --git a/js/src/jit/riscv64/CodeGenerator-riscv64.cpp b/js/src/jit/riscv64/CodeGenerator-riscv64.cpp
index 1c890799ed..3cfb91a036 100644
--- a/js/src/jit/riscv64/CodeGenerator-riscv64.cpp
+++ b/js/src/jit/riscv64/CodeGenerator-riscv64.cpp
@@ -1986,10 +1986,6 @@ void CodeGenerator::visitNotF(LNotF* ins) {
masm.ma_compareF32(dest, Assembler::DoubleEqualOrUnordered, in, fpscratch);
}
-void CodeGenerator::visitMemoryBarrier(LMemoryBarrier* ins) {
- masm.memoryBarrier(ins->type());
-}
-
void CodeGenerator::visitWasmLoad(LWasmLoad* lir) { emitWasmLoad(lir); }
void CodeGenerator::visitWasmStore(LWasmStore* lir) { emitWasmStore(lir); }
diff --git a/js/src/jit/riscv64/MacroAssembler-riscv64.cpp b/js/src/jit/riscv64/MacroAssembler-riscv64.cpp
index 93ccf1cc27..dc5721ea9f 100644
--- a/js/src/jit/riscv64/MacroAssembler-riscv64.cpp
+++ b/js/src/jit/riscv64/MacroAssembler-riscv64.cpp
@@ -2243,7 +2243,7 @@ uint32_t MacroAssembler::pushFakeReturnAddress(Register scratch) {
template <typename T>
static void AtomicExchange(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
- Scalar::Type type, const Synchronization& sync,
+ Scalar::Type type, Synchronization sync,
const T& mem, Register value, Register valueTemp,
Register offsetTemp, Register maskTemp,
Register output) {
@@ -2352,7 +2352,7 @@ static void AtomicExchange(MacroAssembler& masm,
template <typename T>
static void AtomicExchange64(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
- const Synchronization& sync, const T& mem,
+ Synchronization sync, const T& mem,
Register64 value, Register64 output) {
MOZ_ASSERT(value != output);
UseScratchRegisterScope temps(&masm);
@@ -2382,9 +2382,8 @@ static void AtomicExchange64(MacroAssembler& masm,
template <typename T>
static void AtomicFetchOp64(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
- const Synchronization& sync, AtomicOp op,
- Register64 value, const T& mem, Register64 temp,
- Register64 output) {
+ Synchronization sync, AtomicOp op, Register64 value,
+ const T& mem, Register64 temp, Register64 output) {
MOZ_ASSERT(value != output);
MOZ_ASSERT(value != temp);
UseScratchRegisterScope temps(&masm);
@@ -2405,19 +2404,19 @@ static void AtomicFetchOp64(MacroAssembler& masm,
masm.lr_d(true, true, output.reg, SecondScratchReg);
switch (op) {
- case AtomicFetchAddOp:
+ case AtomicOp::Add:
masm.add(temp.reg, output.reg, value.reg);
break;
- case AtomicFetchSubOp:
+ case AtomicOp::Sub:
masm.sub(temp.reg, output.reg, value.reg);
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
masm.and_(temp.reg, output.reg, value.reg);
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
masm.or_(temp.reg, output.reg, value.reg);
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
masm.xor_(temp.reg, output.reg, value.reg);
break;
default:
@@ -2433,10 +2432,9 @@ static void AtomicFetchOp64(MacroAssembler& masm,
template <typename T>
static void AtomicEffectOp(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
- Scalar::Type type, const Synchronization& sync,
- AtomicOp op, const T& mem, Register value,
- Register valueTemp, Register offsetTemp,
- Register maskTemp) {
+ Scalar::Type type, Synchronization sync, AtomicOp op,
+ const T& mem, Register value, Register valueTemp,
+ Register offsetTemp, Register maskTemp) {
ScratchRegisterScope scratch(masm);
UseScratchRegisterScope temps(&masm);
Register scratch2 = temps.Acquire();
@@ -2471,19 +2469,19 @@ static void AtomicEffectOp(MacroAssembler& masm,
masm.lr_w(true, true, scratch2, scratch);
switch (op) {
- case AtomicFetchAddOp:
+ case AtomicOp::Add:
masm.addw(scratch2, scratch2, value);
break;
- case AtomicFetchSubOp:
+ case AtomicOp::Sub:
masm.subw(scratch2, scratch2, value);
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
masm.and_(scratch2, scratch2, value);
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
masm.or_(scratch2, scratch2, value);
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
masm.xor_(scratch2, scratch2, value);
break;
default:
@@ -2519,19 +2517,19 @@ static void AtomicEffectOp(MacroAssembler& masm,
masm.srlw(valueTemp, scratch2, offsetTemp);
switch (op) {
- case AtomicFetchAddOp:
+ case AtomicOp::Add:
masm.addw(valueTemp, valueTemp, value);
break;
- case AtomicFetchSubOp:
+ case AtomicOp::Sub:
masm.subw(valueTemp, valueTemp, value);
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
masm.and_(valueTemp, valueTemp, value);
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
masm.or_(valueTemp, valueTemp, value);
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
masm.xor_(valueTemp, valueTemp, value);
break;
default:
@@ -2563,10 +2561,10 @@ static void AtomicEffectOp(MacroAssembler& masm,
template <typename T>
static void AtomicFetchOp(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
- Scalar::Type type, const Synchronization& sync,
- AtomicOp op, const T& mem, Register value,
- Register valueTemp, Register offsetTemp,
- Register maskTemp, Register output) {
+ Scalar::Type type, Synchronization sync, AtomicOp op,
+ const T& mem, Register value, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
ScratchRegisterScope scratch(masm);
UseScratchRegisterScope temps(&masm);
Register scratch2 = temps.Acquire();
@@ -2602,19 +2600,19 @@ static void AtomicFetchOp(MacroAssembler& masm,
masm.lr_w(true, true, output, scratch);
switch (op) {
- case AtomicFetchAddOp:
+ case AtomicOp::Add:
masm.addw(scratch2, output, value);
break;
- case AtomicFetchSubOp:
+ case AtomicOp::Sub:
masm.subw(scratch2, output, value);
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
masm.and_(scratch2, output, value);
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
masm.or_(scratch2, output, value);
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
masm.xor_(scratch2, output, value);
break;
default:
@@ -2650,19 +2648,19 @@ static void AtomicFetchOp(MacroAssembler& masm,
masm.srlw(output, scratch2, offsetTemp);
switch (op) {
- case AtomicFetchAddOp:
+ case AtomicOp::Add:
masm.addw(valueTemp, output, value);
break;
- case AtomicFetchSubOp:
+ case AtomicOp::Sub:
masm.subw(valueTemp, output, value);
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
masm.and_(valueTemp, output, value);
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
masm.or_(valueTemp, output, value);
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
masm.xor_(valueTemp, output, value);
break;
default:
@@ -2715,7 +2713,7 @@ static void AtomicFetchOp(MacroAssembler& masm,
template <typename T>
static void CompareExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
- const Synchronization& sync, const T& mem,
+ Synchronization sync, const T& mem,
Register oldval, Register newval,
Register valueTemp, Register offsetTemp,
Register maskTemp, Register temp,
@@ -2732,10 +2730,10 @@ static void CompareExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
template <typename T>
static void AtomicExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
- const Synchronization& sync, const T& mem,
- Register value, Register valueTemp,
- Register offsetTemp, Register maskTemp,
- Register temp, AnyRegister output) {
+ Synchronization sync, const T& mem, Register value,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register temp,
+ AnyRegister output) {
if (arrayType == Scalar::Uint32) {
masm.atomicExchange(arrayType, sync, mem, value, valueTemp, offsetTemp,
maskTemp, temp);
@@ -2748,8 +2746,8 @@ static void AtomicExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
template <typename T>
static void AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
- Register value, const T& mem, Register valueTemp,
+ Synchronization sync, AtomicOp op, Register value,
+ const T& mem, Register valueTemp,
Register offsetTemp, Register maskTemp,
Register temp, AnyRegister output) {
if (arrayType == Scalar::Uint32) {
@@ -2763,7 +2761,7 @@ static void AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType,
}
void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
+ Synchronization sync, AtomicOp op,
Register value, const BaseIndex& mem,
Register valueTemp, Register offsetTemp,
Register maskTemp) {
@@ -2772,37 +2770,35 @@ void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
}
void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
+ Synchronization sync, AtomicOp op,
Register value, const Address& mem,
Register valueTemp, Register offsetTemp,
Register maskTemp) {
AtomicEffectOp(*this, nullptr, arrayType, sync, op, mem, value, valueTemp,
offsetTemp, maskTemp);
}
-void MacroAssembler::atomicExchange64(const Synchronization& sync,
- const Address& mem, Register64 value,
- Register64 output) {
+void MacroAssembler::atomicExchange64(Synchronization sync, const Address& mem,
+ Register64 value, Register64 output) {
AtomicExchange64(*this, nullptr, sync, mem, value, output);
}
-void MacroAssembler::atomicExchange64(const Synchronization& sync,
+void MacroAssembler::atomicExchange64(Synchronization sync,
const BaseIndex& mem, Register64 value,
Register64 output) {
AtomicExchange64(*this, nullptr, sync, mem, value, output);
}
void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
- const Synchronization& sync,
- const Address& mem, Register value,
- Register valueTemp, Register offsetTemp,
- Register maskTemp, Register temp,
- AnyRegister output) {
+ Synchronization sync, const Address& mem,
+ Register value, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register temp, AnyRegister output) {
AtomicExchangeJS(*this, arrayType, sync, mem, value, valueTemp, offsetTemp,
maskTemp, temp, output);
}
void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
- const Synchronization& sync,
+ Synchronization sync,
const BaseIndex& mem, Register value,
Register valueTemp, Register offsetTemp,
Register maskTemp, Register temp,
@@ -2811,8 +2807,7 @@ void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
maskTemp, temp, output);
}
-void MacroAssembler::atomicExchange(Scalar::Type type,
- const Synchronization& sync,
+void MacroAssembler::atomicExchange(Scalar::Type type, Synchronization sync,
const Address& mem, Register value,
Register valueTemp, Register offsetTemp,
Register maskTemp, Register output) {
@@ -2820,8 +2815,7 @@ void MacroAssembler::atomicExchange(Scalar::Type type,
maskTemp, output);
}
-void MacroAssembler::atomicExchange(Scalar::Type type,
- const Synchronization& sync,
+void MacroAssembler::atomicExchange(Scalar::Type type, Synchronization sync,
const BaseIndex& mem, Register value,
Register valueTemp, Register offsetTemp,
Register maskTemp, Register output) {
@@ -2830,7 +2824,7 @@ void MacroAssembler::atomicExchange(Scalar::Type type,
}
void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
+ Synchronization sync, AtomicOp op,
Register value, const Address& mem,
Register valueTemp, Register offsetTemp,
Register maskTemp, Register temp,
@@ -2840,7 +2834,7 @@ void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
}
void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
+ Synchronization sync, AtomicOp op,
Register value, const BaseIndex& mem,
Register valueTemp, Register offsetTemp,
Register maskTemp, Register temp,
@@ -2849,20 +2843,20 @@ void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
maskTemp, temp, output);
}
-void MacroAssembler::atomicFetchOp(Scalar::Type type,
- const Synchronization& sync, AtomicOp op,
- Register value, const Address& mem,
- Register valueTemp, Register offsetTemp,
- Register maskTemp, Register output) {
+void MacroAssembler::atomicFetchOp(Scalar::Type type, Synchronization sync,
+ AtomicOp op, Register value,
+ const Address& mem, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
AtomicFetchOp(*this, nullptr, type, sync, op, mem, value, valueTemp,
offsetTemp, maskTemp, output);
}
-void MacroAssembler::atomicFetchOp(Scalar::Type type,
- const Synchronization& sync, AtomicOp op,
- Register value, const BaseIndex& mem,
- Register valueTemp, Register offsetTemp,
- Register maskTemp, Register output) {
+void MacroAssembler::atomicFetchOp(Scalar::Type type, Synchronization sync,
+ AtomicOp op, Register value,
+ const BaseIndex& mem, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
AtomicFetchOp(*this, nullptr, type, sync, op, mem, value, valueTemp,
offsetTemp, maskTemp, output);
}
@@ -3058,7 +3052,7 @@ void MacroAssembler::comment(const char* msg) { Assembler::comment(msg); }
template <typename T>
static void CompareExchange64(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
- const Synchronization& sync, const T& mem,
+ Synchronization sync, const T& mem,
Register64 expect, Register64 replace,
Register64 output) {
MOZ_ASSERT(expect != output && replace != output);
@@ -3092,30 +3086,30 @@ static void CompareExchange64(MacroAssembler& masm,
masm.bind(&exit);
}
-void MacroAssembler::compareExchange64(const Synchronization& sync,
- const Address& mem, Register64 expect,
- Register64 replace, Register64 output) {
+void MacroAssembler::compareExchange64(Synchronization sync, const Address& mem,
+ Register64 expect, Register64 replace,
+ Register64 output) {
CompareExchange64(*this, nullptr, sync, mem, expect, replace, output);
}
-void MacroAssembler::compareExchange64(const Synchronization& sync,
+void MacroAssembler::compareExchange64(Synchronization sync,
const BaseIndex& mem, Register64 expect,
Register64 replace, Register64 output) {
CompareExchange64(*this, nullptr, sync, mem, expect, replace, output);
}
void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
- const Synchronization& sync,
- const Address& mem, Register oldval,
- Register newval, Register valueTemp,
- Register offsetTemp, Register maskTemp,
- Register temp, AnyRegister output) {
+ Synchronization sync, const Address& mem,
+ Register oldval, Register newval,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register temp,
+ AnyRegister output) {
CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval, valueTemp,
offsetTemp, maskTemp, temp, output);
}
void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
- const Synchronization& sync,
+ Synchronization sync,
const BaseIndex& mem, Register oldval,
Register newval, Register valueTemp,
Register offsetTemp, Register maskTemp,
@@ -3947,25 +3941,25 @@ void MacroAssembler::wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access,
AtomicFetchOp64(*this, &access, access.sync(), op, value, mem, temp, output);
}
-void MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op,
+void MacroAssembler::atomicFetchOp64(Synchronization sync, AtomicOp op,
Register64 value, const Address& mem,
Register64 temp, Register64 output) {
AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, output);
}
-void MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op,
+void MacroAssembler::atomicFetchOp64(Synchronization sync, AtomicOp op,
Register64 value, const BaseIndex& mem,
Register64 temp, Register64 output) {
AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, output);
}
-void MacroAssembler::atomicEffectOp64(const Synchronization& sync, AtomicOp op,
+void MacroAssembler::atomicEffectOp64(Synchronization sync, AtomicOp op,
Register64 value, const Address& mem,
Register64 temp) {
AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, temp);
}
-void MacroAssembler::atomicEffectOp64(const Synchronization& sync, AtomicOp op,
+void MacroAssembler::atomicEffectOp64(Synchronization sync, AtomicOp op,
Register64 value, const BaseIndex& mem,
Register64 temp) {
AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, temp);
@@ -4034,7 +4028,7 @@ void MacroAssembler::wasmCompareExchange64(const wasm::MemoryAccessDesc& access,
template <typename T>
static void CompareExchange(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
- Scalar::Type type, const Synchronization& sync,
+ Scalar::Type type, Synchronization sync,
const T& mem, Register oldval, Register newval,
Register valueTemp, Register offsetTemp,
Register maskTemp, Register output) {
@@ -4140,8 +4134,7 @@ static void CompareExchange(MacroAssembler& masm,
masm.bind(&end);
}
-void MacroAssembler::compareExchange(Scalar::Type type,
- const Synchronization& sync,
+void MacroAssembler::compareExchange(Scalar::Type type, Synchronization sync,
const Address& mem, Register oldval,
Register newval, Register valueTemp,
Register offsetTemp, Register maskTemp,
@@ -4150,8 +4143,7 @@ void MacroAssembler::compareExchange(Scalar::Type type,
offsetTemp, maskTemp, output);
}
-void MacroAssembler::compareExchange(Scalar::Type type,
- const Synchronization& sync,
+void MacroAssembler::compareExchange(Scalar::Type type, Synchronization sync,
const BaseIndex& mem, Register oldval,
Register newval, Register valueTemp,
Register offsetTemp, Register maskTemp,
diff --git a/js/src/jit/shared/Assembler-shared.h b/js/src/jit/shared/Assembler-shared.h
index 8abf68504b..6cdf76981b 100644
--- a/js/src/jit/shared/Assembler-shared.h
+++ b/js/src/jit/shared/Assembler-shared.h
@@ -552,7 +552,7 @@ class MemoryAccessDesc {
explicit MemoryAccessDesc(
uint32_t memoryIndex, Scalar::Type type, uint32_t align, uint64_t offset,
BytecodeOffset trapOffset, mozilla::DebugOnly<bool> hugeMemory,
- const jit::Synchronization& sync = jit::Synchronization::None())
+ jit::Synchronization sync = jit::Synchronization::None())
: memoryIndex_(memoryIndex),
offset64_(offset),
align_(align),
@@ -592,7 +592,7 @@ class MemoryAccessDesc {
uint32_t align() const { return align_; }
Scalar::Type type() const { return type_; }
unsigned byteSize() const { return Scalar::byteSize(type()); }
- const jit::Synchronization& sync() const { return sync_; }
+ jit::Synchronization sync() const { return sync_; }
BytecodeOffset trapOffset() const { return trapOffset_; }
wasm::SimdOp widenSimdOp() const {
MOZ_ASSERT(isWidenSimd128Load());
diff --git a/js/src/jit/shared/LIR-shared.h b/js/src/jit/shared/LIR-shared.h
index 1a838f78c3..d8b5693d85 100644
--- a/js/src/jit/shared/LIR-shared.h
+++ b/js/src/jit/shared/LIR-shared.h
@@ -2185,25 +2185,28 @@ class LLoadDataViewElement : public LInstructionHelper<1, 3, 1 + INT64_PIECES> {
};
class LLoadTypedArrayElementHoleBigInt
- : public LInstructionHelper<BOX_PIECES, 2, 1 + INT64_PIECES> {
+ : public LInstructionHelper<BOX_PIECES, 3, 1 + INT64_PIECES> {
public:
LIR_HEADER(LoadTypedArrayElementHoleBigInt)
- LLoadTypedArrayElementHoleBigInt(const LAllocation& object,
+ LLoadTypedArrayElementHoleBigInt(const LAllocation& elements,
const LAllocation& index,
+ const LAllocation& length,
const LDefinition& temp,
const LInt64Definition& temp64)
: LInstructionHelper(classOpcode) {
- setOperand(0, object);
+ setOperand(0, elements);
setOperand(1, index);
+ setOperand(2, length);
setTemp(0, temp);
setInt64Temp(1, temp64);
}
const MLoadTypedArrayElementHole* mir() const {
return mir_->toLoadTypedArrayElementHole();
}
- const LAllocation* object() { return getOperand(0); }
+ const LAllocation* elements() { return getOperand(0); }
const LAllocation* index() { return getOperand(1); }
+ const LAllocation* length() { return getOperand(2); }
const LDefinition* temp() { return getTemp(0); }
const LInt64Definition temp64() { return getInt64Temp(1); }
};
diff --git a/js/src/jit/wasm32/CodeGenerator-wasm32.cpp b/js/src/jit/wasm32/CodeGenerator-wasm32.cpp
index 923297a0c1..4c27637203 100644
--- a/js/src/jit/wasm32/CodeGenerator-wasm32.cpp
+++ b/js/src/jit/wasm32/CodeGenerator-wasm32.cpp
@@ -175,7 +175,6 @@ void CodeGenerator::visitWasmAtomicBinopHeapForEffect(
}
void CodeGenerator::visitWasmStackArg(LWasmStackArg* ins) { MOZ_CRASH(); }
void CodeGenerator::visitWasmStackArgI64(LWasmStackArgI64* ins) { MOZ_CRASH(); }
-void CodeGenerator::visitMemoryBarrier(LMemoryBarrier* ins) { MOZ_CRASH(); }
void CodeGenerator::visitSimd128(LSimd128* ins) { MOZ_CRASH(); }
void CodeGenerator::visitWasmTernarySimd128(LWasmTernarySimd128* ins) {
MOZ_CRASH();
diff --git a/js/src/jit/x64/CodeGenerator-x64.cpp b/js/src/jit/x64/CodeGenerator-x64.cpp
index 9e5319842b..86d4bca0e0 100644
--- a/js/src/jit/x64/CodeGenerator-x64.cpp
+++ b/js/src/jit/x64/CodeGenerator-x64.cpp
@@ -432,7 +432,7 @@ void CodeGenerator::visitAtomicTypedArrayElementBinop64(
// Add and Sub don't need |fetchTemp| and can save a `mov` when the value and
// output register are equal to each other.
- if (atomicOp == AtomicFetchAddOp || atomicOp == AtomicFetchSubOp) {
+ if (atomicOp == AtomicOp::Add || atomicOp == AtomicOp::Sub) {
fetchTemp = Register64::Invalid();
fetchOut = temp1;
createTemp = temp2.reg;
diff --git a/js/src/jit/x64/Lowering-x64.cpp b/js/src/jit/x64/Lowering-x64.cpp
index 55d83e3f05..9f9b1713c2 100644
--- a/js/src/jit/x64/Lowering-x64.cpp
+++ b/js/src/jit/x64/Lowering-x64.cpp
@@ -208,8 +208,8 @@ void LIRGenerator::visitAtomicTypedArrayElementBinop(
//
// For AND/OR/XOR we need to use a CMPXCHG loop with rax as a temp register.
- bool bitOp = !(ins->operation() == AtomicFetchAddOp ||
- ins->operation() == AtomicFetchSubOp);
+ bool bitOp = !(ins->operation() == AtomicOp::Add ||
+ ins->operation() == AtomicOp::Sub);
LInt64Definition temp1 = tempInt64();
LInt64Definition temp2;
@@ -427,8 +427,8 @@ void LIRGenerator::visitWasmAtomicBinopHeap(MWasmAtomicBinopHeap* ins) {
// *mem does not have the expected value, so reloading it at the
// top of the loop would be redundant.
- bool bitOp = !(ins->operation() == AtomicFetchAddOp ||
- ins->operation() == AtomicFetchSubOp);
+ bool bitOp =
+ !(ins->operation() == AtomicOp::Add || ins->operation() == AtomicOp::Sub);
bool reuseInput = false;
LAllocation value;
diff --git a/js/src/jit/x64/MacroAssembler-x64.cpp b/js/src/jit/x64/MacroAssembler-x64.cpp
index 5106e7e382..ebc8c91eaa 100644
--- a/js/src/jit/x64/MacroAssembler-x64.cpp
+++ b/js/src/jit/x64/MacroAssembler-x64.cpp
@@ -1459,7 +1459,7 @@ static void AtomicFetchOp64(MacroAssembler& masm,
Register output) {
// NOTE: the generated code must match the assembly code in gen_fetchop in
// GenerateAtomicOperations.py
- if (op == AtomicFetchAddOp) {
+ if (op == AtomicOp::Add) {
if (value != output) {
masm.movq(value, output);
}
@@ -1468,7 +1468,7 @@ static void AtomicFetchOp64(MacroAssembler& masm,
FaultingCodeOffset(masm.currentOffset()));
}
masm.lock_xaddq(output, Operand(mem));
- } else if (op == AtomicFetchSubOp) {
+ } else if (op == AtomicOp::Sub) {
if (value != output) {
masm.movq(value, output);
}
@@ -1492,13 +1492,13 @@ static void AtomicFetchOp64(MacroAssembler& masm,
masm.bind(&again);
masm.movq(rax, temp);
switch (op) {
- case AtomicFetchAndOp:
+ case AtomicOp::And:
masm.andq(value, temp);
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
masm.orq(value, temp);
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
masm.xorq(value, temp);
break;
default:
@@ -1532,19 +1532,19 @@ static void AtomicEffectOp64(MacroAssembler& masm,
FaultingCodeOffset(masm.currentOffset()));
}
switch (op) {
- case AtomicFetchAddOp:
+ case AtomicOp::Add:
masm.lock_addq(value, Operand(mem));
break;
- case AtomicFetchSubOp:
+ case AtomicOp::Sub:
masm.lock_subq(value, Operand(mem));
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
masm.lock_andq(value, Operand(mem));
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
masm.lock_orq(value, Operand(mem));
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
masm.lock_xorq(value, Operand(mem));
break;
default:
@@ -1558,8 +1558,8 @@ void MacroAssembler::wasmAtomicEffectOp64(const wasm::MemoryAccessDesc& access,
AtomicEffectOp64(*this, &access, op, value.reg, mem);
}
-void MacroAssembler::compareExchange64(const Synchronization&,
- const Address& mem, Register64 expected,
+void MacroAssembler::compareExchange64(Synchronization, const Address& mem,
+ Register64 expected,
Register64 replacement,
Register64 output) {
// NOTE: the generated code must match the assembly code in gen_cmpxchg in
@@ -1571,8 +1571,7 @@ void MacroAssembler::compareExchange64(const Synchronization&,
lock_cmpxchgq(replacement.reg, Operand(mem));
}
-void MacroAssembler::compareExchange64(const Synchronization&,
- const BaseIndex& mem,
+void MacroAssembler::compareExchange64(Synchronization, const BaseIndex& mem,
Register64 expected,
Register64 replacement,
Register64 output) {
@@ -1583,9 +1582,8 @@ void MacroAssembler::compareExchange64(const Synchronization&,
lock_cmpxchgq(replacement.reg, Operand(mem));
}
-void MacroAssembler::atomicExchange64(const Synchronization&,
- const Address& mem, Register64 value,
- Register64 output) {
+void MacroAssembler::atomicExchange64(Synchronization, const Address& mem,
+ Register64 value, Register64 output) {
// NOTE: the generated code must match the assembly code in gen_exchange in
// GenerateAtomicOperations.py
if (value != output) {
@@ -1594,33 +1592,32 @@ void MacroAssembler::atomicExchange64(const Synchronization&,
xchgq(output.reg, Operand(mem));
}
-void MacroAssembler::atomicExchange64(const Synchronization&,
- const BaseIndex& mem, Register64 value,
- Register64 output) {
+void MacroAssembler::atomicExchange64(Synchronization, const BaseIndex& mem,
+ Register64 value, Register64 output) {
if (value != output) {
movq(value.reg, output.reg);
}
xchgq(output.reg, Operand(mem));
}
-void MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op,
+void MacroAssembler::atomicFetchOp64(Synchronization sync, AtomicOp op,
Register64 value, const Address& mem,
Register64 temp, Register64 output) {
AtomicFetchOp64(*this, nullptr, op, value.reg, mem, temp.reg, output.reg);
}
-void MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op,
+void MacroAssembler::atomicFetchOp64(Synchronization sync, AtomicOp op,
Register64 value, const BaseIndex& mem,
Register64 temp, Register64 output) {
AtomicFetchOp64(*this, nullptr, op, value.reg, mem, temp.reg, output.reg);
}
-void MacroAssembler::atomicEffectOp64(const Synchronization& sync, AtomicOp op,
+void MacroAssembler::atomicEffectOp64(Synchronization sync, AtomicOp op,
Register64 value, const Address& mem) {
AtomicEffectOp64(*this, nullptr, op, value.reg, mem);
}
-void MacroAssembler::atomicEffectOp64(const Synchronization& sync, AtomicOp op,
+void MacroAssembler::atomicEffectOp64(Synchronization sync, AtomicOp op,
Register64 value, const BaseIndex& mem) {
AtomicEffectOp64(*this, nullptr, op, value.reg, mem);
}
diff --git a/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp b/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
index 434a54669b..692e884f06 100644
--- a/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
+++ b/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
@@ -2078,12 +2078,6 @@ void CodeGenerator::visitAtomicTypedArrayElementBinopForEffect(
}
}
-void CodeGenerator::visitMemoryBarrier(LMemoryBarrier* ins) {
- if (ins->type() & MembarStoreLoad) {
- masm.storeLoadFence();
- }
-}
-
void CodeGeneratorX86Shared::visitOutOfLineWasmTruncateCheck(
OutOfLineWasmTruncateCheck* ool) {
FloatRegister input = ool->input();
diff --git a/js/src/jit/x86-shared/Lowering-x86-shared.cpp b/js/src/jit/x86-shared/Lowering-x86-shared.cpp
index bd5986298d..6d90f2f96b 100644
--- a/js/src/jit/x86-shared/Lowering-x86-shared.cpp
+++ b/js/src/jit/x86-shared/Lowering-x86-shared.cpp
@@ -732,8 +732,8 @@ void LIRGeneratorX86Shared::lowerAtomicTypedArrayElementBinop(
// There are optimization opportunities:
// - better register allocation in the x86 8-bit case, Bug #1077036.
- bool bitOp = !(ins->operation() == AtomicFetchAddOp ||
- ins->operation() == AtomicFetchSubOp);
+ bool bitOp =
+ !(ins->operation() == AtomicOp::Add || ins->operation() == AtomicOp::Sub);
bool fixedOutput = true;
bool reuseInput = false;
LDefinition tempDef1 = LDefinition::BogusTemp();
diff --git a/js/src/jit/x86-shared/MacroAssembler-x86-shared-inl.h b/js/src/jit/x86-shared/MacroAssembler-x86-shared-inl.h
index 9848086e7f..8ce3f68224 100644
--- a/js/src/jit/x86-shared/MacroAssembler-x86-shared-inl.h
+++ b/js/src/jit/x86-shared/MacroAssembler-x86-shared-inl.h
@@ -1246,7 +1246,8 @@ template FaultingCodeOffset MacroAssembler::storeFloat32(FloatRegister src,
void MacroAssembler::memoryBarrier(MemoryBarrierBits barrier) {
if (barrier & MembarStoreLoad) {
- storeLoadFence();
+ // This implementation follows Linux.
+ masm.mfence();
}
}
diff --git a/js/src/jit/x86-shared/MacroAssembler-x86-shared.cpp b/js/src/jit/x86-shared/MacroAssembler-x86-shared.cpp
index e474f83530..1520321260 100644
--- a/js/src/jit/x86-shared/MacroAssembler-x86-shared.cpp
+++ b/js/src/jit/x86-shared/MacroAssembler-x86-shared.cpp
@@ -1143,13 +1143,13 @@ static void CompareExchange(MacroAssembler& masm,
ExtendTo32(masm, type, output);
}
-void MacroAssembler::compareExchange(Scalar::Type type, const Synchronization&,
+void MacroAssembler::compareExchange(Scalar::Type type, Synchronization,
const Address& mem, Register oldval,
Register newval, Register output) {
CompareExchange(*this, nullptr, type, mem, oldval, newval, output);
}
-void MacroAssembler::compareExchange(Scalar::Type type, const Synchronization&,
+void MacroAssembler::compareExchange(Scalar::Type type, Synchronization,
const BaseIndex& mem, Register oldval,
Register newval, Register output) {
CompareExchange(*this, nullptr, type, mem, oldval, newval, output);
@@ -1201,13 +1201,13 @@ static void AtomicExchange(MacroAssembler& masm,
ExtendTo32(masm, type, output);
}
-void MacroAssembler::atomicExchange(Scalar::Type type, const Synchronization&,
+void MacroAssembler::atomicExchange(Scalar::Type type, Synchronization,
const Address& mem, Register value,
Register output) {
AtomicExchange(*this, nullptr, type, mem, value, output);
}
-void MacroAssembler::atomicExchange(Scalar::Type type, const Synchronization&,
+void MacroAssembler::atomicExchange(Scalar::Type type, Synchronization,
const BaseIndex& mem, Register value,
Register output) {
AtomicExchange(*this, nullptr, type, mem, value, output);
@@ -1227,7 +1227,7 @@ void MacroAssembler::wasmAtomicExchange(const wasm::MemoryAccessDesc& access,
static void SetupValue(MacroAssembler& masm, AtomicOp op, Imm32 src,
Register output) {
- if (op == AtomicFetchSubOp) {
+ if (op == AtomicOp::Sub) {
masm.movl(Imm32(-src.value), output);
} else {
masm.movl(src, output);
@@ -1239,7 +1239,7 @@ static void SetupValue(MacroAssembler& masm, AtomicOp op, Register src,
if (src != output) {
masm.movl(src, output);
}
- if (op == AtomicFetchSubOp) {
+ if (op == AtomicOp::Sub) {
masm.negl(output);
}
}
@@ -1269,15 +1269,14 @@ static void AtomicFetchOp(MacroAssembler& masm,
masm.j(MacroAssembler::NonZero, &again); \
} while (0)
- MOZ_ASSERT_IF(op == AtomicFetchAddOp || op == AtomicFetchSubOp,
- temp == InvalidReg);
+ MOZ_ASSERT_IF(op == AtomicOp::Add || op == AtomicOp::Sub, temp == InvalidReg);
switch (Scalar::byteSize(arrayType)) {
case 1:
CheckBytereg(output);
switch (op) {
- case AtomicFetchAddOp:
- case AtomicFetchSubOp:
+ case AtomicOp::Add:
+ case AtomicOp::Sub:
CheckBytereg(value); // But not for the bitwise ops
SetupValue(masm, op, value, output);
if (access) {
@@ -1286,17 +1285,17 @@ static void AtomicFetchOp(MacroAssembler& masm,
}
masm.lock_xaddb(output, Operand(mem));
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
CheckBytereg(temp);
ATOMIC_BITOP_BODY(movb, wasm::TrapMachineInsn::Load8, andl,
lock_cmpxchgb);
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
CheckBytereg(temp);
ATOMIC_BITOP_BODY(movb, wasm::TrapMachineInsn::Load8, orl,
lock_cmpxchgb);
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
CheckBytereg(temp);
ATOMIC_BITOP_BODY(movb, wasm::TrapMachineInsn::Load8, xorl,
lock_cmpxchgb);
@@ -1307,8 +1306,8 @@ static void AtomicFetchOp(MacroAssembler& masm,
break;
case 2:
switch (op) {
- case AtomicFetchAddOp:
- case AtomicFetchSubOp:
+ case AtomicOp::Add:
+ case AtomicOp::Sub:
SetupValue(masm, op, value, output);
if (access) {
masm.append(*access, wasm::TrapMachineInsn::Atomic,
@@ -1316,15 +1315,15 @@ static void AtomicFetchOp(MacroAssembler& masm,
}
masm.lock_xaddw(output, Operand(mem));
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
ATOMIC_BITOP_BODY(movw, wasm::TrapMachineInsn::Load16, andl,
lock_cmpxchgw);
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
ATOMIC_BITOP_BODY(movw, wasm::TrapMachineInsn::Load16, orl,
lock_cmpxchgw);
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
ATOMIC_BITOP_BODY(movw, wasm::TrapMachineInsn::Load16, xorl,
lock_cmpxchgw);
break;
@@ -1334,8 +1333,8 @@ static void AtomicFetchOp(MacroAssembler& masm,
break;
case 4:
switch (op) {
- case AtomicFetchAddOp:
- case AtomicFetchSubOp:
+ case AtomicOp::Add:
+ case AtomicOp::Sub:
SetupValue(masm, op, value, output);
if (access) {
masm.append(*access, wasm::TrapMachineInsn::Atomic,
@@ -1343,15 +1342,15 @@ static void AtomicFetchOp(MacroAssembler& masm,
}
masm.lock_xaddl(output, Operand(mem));
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
ATOMIC_BITOP_BODY(movl, wasm::TrapMachineInsn::Load32, andl,
lock_cmpxchgl);
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
ATOMIC_BITOP_BODY(movl, wasm::TrapMachineInsn::Load32, orl,
lock_cmpxchgl);
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
ATOMIC_BITOP_BODY(movl, wasm::TrapMachineInsn::Load32, xorl,
lock_cmpxchgl);
break;
@@ -1367,30 +1366,29 @@ static void AtomicFetchOp(MacroAssembler& masm,
#undef ATOMIC_BITOP_BODY
}
-void MacroAssembler::atomicFetchOp(Scalar::Type arrayType,
- const Synchronization&, AtomicOp op,
- Register value, const BaseIndex& mem,
- Register temp, Register output) {
+void MacroAssembler::atomicFetchOp(Scalar::Type arrayType, Synchronization,
+ AtomicOp op, Register value,
+ const BaseIndex& mem, Register temp,
+ Register output) {
AtomicFetchOp(*this, nullptr, arrayType, op, value, mem, temp, output);
}
-void MacroAssembler::atomicFetchOp(Scalar::Type arrayType,
- const Synchronization&, AtomicOp op,
- Register value, const Address& mem,
- Register temp, Register output) {
+void MacroAssembler::atomicFetchOp(Scalar::Type arrayType, Synchronization,
+ AtomicOp op, Register value,
+ const Address& mem, Register temp,
+ Register output) {
AtomicFetchOp(*this, nullptr, arrayType, op, value, mem, temp, output);
}
-void MacroAssembler::atomicFetchOp(Scalar::Type arrayType,
- const Synchronization&, AtomicOp op,
- Imm32 value, const BaseIndex& mem,
- Register temp, Register output) {
+void MacroAssembler::atomicFetchOp(Scalar::Type arrayType, Synchronization,
+ AtomicOp op, Imm32 value,
+ const BaseIndex& mem, Register temp,
+ Register output) {
AtomicFetchOp(*this, nullptr, arrayType, op, value, mem, temp, output);
}
-void MacroAssembler::atomicFetchOp(Scalar::Type arrayType,
- const Synchronization&, AtomicOp op,
- Imm32 value, const Address& mem,
+void MacroAssembler::atomicFetchOp(Scalar::Type arrayType, Synchronization,
+ AtomicOp op, Imm32 value, const Address& mem,
Register temp, Register output) {
AtomicFetchOp(*this, nullptr, arrayType, op, value, mem, temp, output);
}
@@ -1436,19 +1434,19 @@ static void AtomicEffectOp(MacroAssembler& masm,
switch (Scalar::byteSize(arrayType)) {
case 1:
switch (op) {
- case AtomicFetchAddOp:
+ case AtomicOp::Add:
masm.lock_addb(value, Operand(mem));
break;
- case AtomicFetchSubOp:
+ case AtomicOp::Sub:
masm.lock_subb(value, Operand(mem));
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
masm.lock_andb(value, Operand(mem));
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
masm.lock_orb(value, Operand(mem));
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
masm.lock_xorb(value, Operand(mem));
break;
default:
@@ -1457,19 +1455,19 @@ static void AtomicEffectOp(MacroAssembler& masm,
break;
case 2:
switch (op) {
- case AtomicFetchAddOp:
+ case AtomicOp::Add:
masm.lock_addw(value, Operand(mem));
break;
- case AtomicFetchSubOp:
+ case AtomicOp::Sub:
masm.lock_subw(value, Operand(mem));
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
masm.lock_andw(value, Operand(mem));
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
masm.lock_orw(value, Operand(mem));
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
masm.lock_xorw(value, Operand(mem));
break;
default:
@@ -1478,19 +1476,19 @@ static void AtomicEffectOp(MacroAssembler& masm,
break;
case 4:
switch (op) {
- case AtomicFetchAddOp:
+ case AtomicOp::Add:
masm.lock_addl(value, Operand(mem));
break;
- case AtomicFetchSubOp:
+ case AtomicOp::Sub:
masm.lock_subl(value, Operand(mem));
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
masm.lock_andl(value, Operand(mem));
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
masm.lock_orl(value, Operand(mem));
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
masm.lock_xorl(value, Operand(mem));
break;
default:
@@ -1535,7 +1533,7 @@ void MacroAssembler::wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access,
template <typename T>
static void CompareExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
- const Synchronization& sync, const T& mem,
+ Synchronization sync, const T& mem,
Register oldval, Register newval, Register temp,
AnyRegister output) {
if (arrayType == Scalar::Uint32) {
@@ -1547,15 +1545,14 @@ static void CompareExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
}
void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
- const Synchronization& sync,
- const Address& mem, Register oldval,
- Register newval, Register temp,
- AnyRegister output) {
+ Synchronization sync, const Address& mem,
+ Register oldval, Register newval,
+ Register temp, AnyRegister output) {
CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval, temp, output);
}
void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
- const Synchronization& sync,
+ Synchronization sync,
const BaseIndex& mem, Register oldval,
Register newval, Register temp,
AnyRegister output) {
@@ -1564,9 +1561,8 @@ void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
template <typename T>
static void AtomicExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
- const Synchronization& sync, const T& mem,
- Register value, Register temp,
- AnyRegister output) {
+ Synchronization sync, const T& mem, Register value,
+ Register temp, AnyRegister output) {
if (arrayType == Scalar::Uint32) {
masm.atomicExchange(arrayType, sync, mem, value, temp);
masm.convertUInt32ToDouble(temp, output.fpu());
@@ -1576,14 +1572,14 @@ static void AtomicExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
}
void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
- const Synchronization& sync,
- const Address& mem, Register value,
- Register temp, AnyRegister output) {
+ Synchronization sync, const Address& mem,
+ Register value, Register temp,
+ AnyRegister output) {
AtomicExchangeJS(*this, arrayType, sync, mem, value, temp, output);
}
void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
- const Synchronization& sync,
+ Synchronization sync,
const BaseIndex& mem, Register value,
Register temp, AnyRegister output) {
AtomicExchangeJS(*this, arrayType, sync, mem, value, temp, output);
@@ -1591,9 +1587,9 @@ void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
template <typename T>
static void AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
- Register value, const T& mem, Register temp1,
- Register temp2, AnyRegister output) {
+ Synchronization sync, AtomicOp op, Register value,
+ const T& mem, Register temp1, Register temp2,
+ AnyRegister output) {
if (arrayType == Scalar::Uint32) {
masm.atomicFetchOp(arrayType, sync, op, value, mem, temp2, temp1);
masm.convertUInt32ToDouble(temp1, output.fpu());
@@ -1603,7 +1599,7 @@ static void AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType,
}
void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
+ Synchronization sync, AtomicOp op,
Register value, const Address& mem,
Register temp1, Register temp2,
AnyRegister output) {
@@ -1611,39 +1607,36 @@ void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
}
void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
+ Synchronization sync, AtomicOp op,
Register value, const BaseIndex& mem,
Register temp1, Register temp2,
AnyRegister output) {
AtomicFetchOpJS(*this, arrayType, sync, op, value, mem, temp1, temp2, output);
}
-void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
- const Synchronization&, AtomicOp op,
- Register value, const BaseIndex& mem,
- Register temp) {
+void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType, Synchronization,
+ AtomicOp op, Register value,
+ const BaseIndex& mem, Register temp) {
MOZ_ASSERT(temp == InvalidReg);
AtomicEffectOp(*this, nullptr, arrayType, op, value, mem);
}
-void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
- const Synchronization&, AtomicOp op,
- Register value, const Address& mem,
- Register temp) {
+void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType, Synchronization,
+ AtomicOp op, Register value,
+ const Address& mem, Register temp) {
MOZ_ASSERT(temp == InvalidReg);
AtomicEffectOp(*this, nullptr, arrayType, op, value, mem);
}
-void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
- const Synchronization&, AtomicOp op,
- Imm32 value, const Address& mem,
- Register temp) {
+void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType, Synchronization,
+ AtomicOp op, Imm32 value,
+ const Address& mem, Register temp) {
MOZ_ASSERT(temp == InvalidReg);
AtomicEffectOp(*this, nullptr, arrayType, op, value, mem);
}
void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
+ Synchronization sync, AtomicOp op,
Imm32 value, const BaseIndex& mem,
Register temp) {
MOZ_ASSERT(temp == InvalidReg);
@@ -1652,9 +1645,9 @@ void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
template <typename T>
static void AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
- Imm32 value, const T& mem, Register temp1,
- Register temp2, AnyRegister output) {
+ Synchronization sync, AtomicOp op, Imm32 value,
+ const T& mem, Register temp1, Register temp2,
+ AnyRegister output) {
if (arrayType == Scalar::Uint32) {
masm.atomicFetchOp(arrayType, sync, op, value, mem, temp2, temp1);
masm.convertUInt32ToDouble(temp1, output.fpu());
@@ -1664,7 +1657,7 @@ static void AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType,
}
void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
+ Synchronization sync, AtomicOp op,
Imm32 value, const Address& mem,
Register temp1, Register temp2,
AnyRegister output) {
@@ -1672,7 +1665,7 @@ void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
}
void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
+ Synchronization sync, AtomicOp op,
Imm32 value, const BaseIndex& mem,
Register temp1, Register temp2,
AnyRegister output) {
diff --git a/js/src/jit/x86-shared/MacroAssembler-x86-shared.h b/js/src/jit/x86-shared/MacroAssembler-x86-shared.h
index dd1ae53537..21af90e90d 100644
--- a/js/src/jit/x86-shared/MacroAssembler-x86-shared.h
+++ b/js/src/jit/x86-shared/MacroAssembler-x86-shared.h
@@ -161,15 +161,6 @@ class MacroAssemblerX86Shared : public Assembler {
void atomic_inc32(const Operand& addr) { lock_incl(addr); }
void atomic_dec32(const Operand& addr) { lock_decl(addr); }
- void storeLoadFence() {
- // This implementation follows Linux.
- if (HasSSE2()) {
- masm.mfence();
- } else {
- lock_addl(Imm32(0), Operand(Address(esp, 0)));
- }
- }
-
void branch16(Condition cond, Register lhs, Register rhs, Label* label) {
cmpw(rhs, lhs);
j(cond, label);
diff --git a/js/src/jit/x86/Lowering-x86.cpp b/js/src/jit/x86/Lowering-x86.cpp
index 0577a0976e..e958e998c2 100644
--- a/js/src/jit/x86/Lowering-x86.cpp
+++ b/js/src/jit/x86/Lowering-x86.cpp
@@ -635,8 +635,8 @@ void LIRGenerator::visitWasmAtomicBinopHeap(MWasmAtomicBinopHeap* ins) {
// - better 8-bit register allocation and instruction selection, Bug
// #1077036.
- bool bitOp = !(ins->operation() == AtomicFetchAddOp ||
- ins->operation() == AtomicFetchSubOp);
+ bool bitOp =
+ !(ins->operation() == AtomicOp::Add || ins->operation() == AtomicOp::Sub);
LDefinition tempDef = LDefinition::BogusTemp();
LAllocation value;
diff --git a/js/src/jit/x86/MacroAssembler-x86.cpp b/js/src/jit/x86/MacroAssembler-x86.cpp
index a68d7b03b7..232303b429 100644
--- a/js/src/jit/x86/MacroAssembler-x86.cpp
+++ b/js/src/jit/x86/MacroAssembler-x86.cpp
@@ -1423,19 +1423,19 @@ static void AtomicFetchOp64(MacroAssembler& masm,
} while (0)
switch (op) {
- case AtomicFetchAddOp:
+ case AtomicOp::Add:
ATOMIC_OP_BODY(add64FromMemory);
break;
- case AtomicFetchSubOp:
+ case AtomicOp::Sub:
ATOMIC_OP_BODY(sub64FromMemory);
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
ATOMIC_OP_BODY(and64FromMemory);
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
ATOMIC_OP_BODY(or64FromMemory);
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
ATOMIC_OP_BODY(xor64FromMemory);
break;
default:
@@ -1626,60 +1626,57 @@ void MacroAssembler::wasmTruncateFloat32ToUInt64(
// ========================================================================
// Primitive atomic operations.
-void MacroAssembler::atomicLoad64(const Synchronization&, const Address& mem,
+void MacroAssembler::atomicLoad64(Synchronization, const Address& mem,
Register64 temp, Register64 output) {
AtomicLoad64(*this, nullptr, mem, temp, output);
}
-void MacroAssembler::atomicLoad64(const Synchronization&, const BaseIndex& mem,
+void MacroAssembler::atomicLoad64(Synchronization, const BaseIndex& mem,
Register64 temp, Register64 output) {
AtomicLoad64(*this, nullptr, mem, temp, output);
}
-void MacroAssembler::atomicStore64(const Synchronization&, const Address& mem,
+void MacroAssembler::atomicStore64(Synchronization, const Address& mem,
Register64 value, Register64 temp) {
AtomicExchange64(*this, nullptr, mem, value, temp);
}
-void MacroAssembler::atomicStore64(const Synchronization&, const BaseIndex& mem,
+void MacroAssembler::atomicStore64(Synchronization, const BaseIndex& mem,
Register64 value, Register64 temp) {
AtomicExchange64(*this, nullptr, mem, value, temp);
}
-void MacroAssembler::compareExchange64(const Synchronization&,
- const Address& mem, Register64 expected,
+void MacroAssembler::compareExchange64(Synchronization, const Address& mem,
+ Register64 expected,
Register64 replacement,
Register64 output) {
CompareExchange64(*this, nullptr, mem, expected, replacement, output);
}
-void MacroAssembler::compareExchange64(const Synchronization&,
- const BaseIndex& mem,
+void MacroAssembler::compareExchange64(Synchronization, const BaseIndex& mem,
Register64 expected,
Register64 replacement,
Register64 output) {
CompareExchange64(*this, nullptr, mem, expected, replacement, output);
}
-void MacroAssembler::atomicExchange64(const Synchronization&,
- const Address& mem, Register64 value,
- Register64 output) {
+void MacroAssembler::atomicExchange64(Synchronization, const Address& mem,
+ Register64 value, Register64 output) {
AtomicExchange64(*this, nullptr, mem, value, output);
}
-void MacroAssembler::atomicExchange64(const Synchronization&,
- const BaseIndex& mem, Register64 value,
- Register64 output) {
+void MacroAssembler::atomicExchange64(Synchronization, const BaseIndex& mem,
+ Register64 value, Register64 output) {
AtomicExchange64(*this, nullptr, mem, value, output);
}
-void MacroAssembler::atomicFetchOp64(const Synchronization&, AtomicOp op,
+void MacroAssembler::atomicFetchOp64(Synchronization, AtomicOp op,
const Address& value, const Address& mem,
Register64 temp, Register64 output) {
AtomicFetchOp64(*this, nullptr, op, value, mem, temp, output);
}
-void MacroAssembler::atomicFetchOp64(const Synchronization&, AtomicOp op,
+void MacroAssembler::atomicFetchOp64(Synchronization, AtomicOp op,
const Address& value, const BaseIndex& mem,
Register64 temp, Register64 output) {
AtomicFetchOp64(*this, nullptr, op, value, mem, temp, output);