summaryrefslogtreecommitdiffstats
path: root/js/src/jit
diff options
context:
space:
mode:
Diffstat (limited to 'js/src/jit')
-rw-r--r--js/src/jit/ABIFunctionList-inl.h17
-rw-r--r--js/src/jit/Bailouts.cpp2
-rw-r--r--js/src/jit/BaselineCacheIRCompiler.cpp14
-rw-r--r--js/src/jit/BaselineCodeGen.cpp12
-rw-r--r--js/src/jit/BaselineDebugModeOSR.cpp3
-rw-r--r--js/src/jit/BaselineIC.cpp45
-rw-r--r--js/src/jit/BaselineIC.h4
-rw-r--r--js/src/jit/BaselineICList.h1
-rw-r--r--js/src/jit/BranchHinting.cpp61
-rw-r--r--js/src/jit/BranchHinting.h21
-rw-r--r--js/src/jit/CacheIR.cpp93
-rw-r--r--js/src/jit/CacheIR.h1
-rw-r--r--js/src/jit/CacheIRCompiler.cpp85
-rw-r--r--js/src/jit/CacheIRCompiler.h3
-rw-r--r--js/src/jit/CacheIRGenerator.h17
-rw-r--r--js/src/jit/CacheIROps.yaml9
-rw-r--r--js/src/jit/CacheIRReader.h4
-rw-r--r--js/src/jit/CacheIRSpewer.cpp16
-rw-r--r--js/src/jit/CacheIRSpewer.h5
-rw-r--r--js/src/jit/CacheIRWriter.h4
-rw-r--r--js/src/jit/CodeGenerator.cpp768
-rw-r--r--js/src/jit/CodeGenerator.h12
-rw-r--r--js/src/jit/CompileInfo.h9
-rw-r--r--js/src/jit/GenerateCacheIRFiles.py8
-rw-r--r--js/src/jit/InterpreterEntryTrampoline.cpp13
-rw-r--r--js/src/jit/InterpreterEntryTrampoline.h2
-rw-r--r--js/src/jit/Ion.cpp14
-rw-r--r--js/src/jit/IonCacheIRCompiler.cpp2
-rw-r--r--js/src/jit/IonIC.cpp3
-rw-r--r--js/src/jit/IonTypes.h3
-rw-r--r--js/src/jit/JitFrames.cpp19
-rw-r--r--js/src/jit/JitRuntime.h4
-rw-r--r--js/src/jit/JitScript.cpp20
-rw-r--r--js/src/jit/JitSpewer.cpp3
-rw-r--r--js/src/jit/JitSpewer.h2
-rw-r--r--js/src/jit/LIR.h2
-rw-r--r--js/src/jit/LIROps.yaml19
-rw-r--r--js/src/jit/Lowering.cpp79
-rw-r--r--js/src/jit/MIR.cpp87
-rw-r--r--js/src/jit/MIR.h17
-rw-r--r--js/src/jit/MIRGenerator.h1
-rw-r--r--js/src/jit/MIRGraph.cpp4
-rw-r--r--js/src/jit/MIRGraph.h12
-rw-r--r--js/src/jit/MIROps.yaml19
-rw-r--r--js/src/jit/MacroAssembler-inl.h12
-rw-r--r--js/src/jit/MacroAssembler.cpp60
-rw-r--r--js/src/jit/MacroAssembler.h10
-rw-r--r--js/src/jit/RangeAnalysis.cpp2
-rw-r--r--js/src/jit/TrampolineNatives.cpp44
-rw-r--r--js/src/jit/TrampolineNatives.h4
-rw-r--r--js/src/jit/TypePolicy.cpp181
-rw-r--r--js/src/jit/VMFunctionList-inl.h2
-rw-r--r--js/src/jit/VMFunctions.cpp43
-rw-r--r--js/src/jit/VMFunctions.h7
-rw-r--r--js/src/jit/WarpBuilder.cpp43
-rw-r--r--js/src/jit/WarpCacheIRTranspiler.cpp25
-rw-r--r--js/src/jit/WarpOracle.cpp40
-rw-r--r--js/src/jit/arm64/MacroAssembler-arm64.cpp2
-rw-r--r--js/src/jit/loong64/CodeGenerator-loong64.cpp10
-rw-r--r--js/src/jit/moz.build1
-rw-r--r--js/src/jit/shared/AtomicOperations-shared-jit.h16
-rw-r--r--js/src/jit/shared/LIR-shared.h98
-rw-r--r--js/src/jit/x64/CodeGenerator-x64.cpp1
-rw-r--r--js/src/jit/x64/Lowering-x64.cpp1
-rw-r--r--js/src/jit/x64/MacroAssembler-x64.cpp3
-rw-r--r--js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp2
-rw-r--r--js/src/jit/x86-shared/Lowering-x86-shared.cpp1
-rw-r--r--js/src/jit/x86/Lowering-x86.cpp1
-rw-r--r--js/src/jit/x86/MacroAssembler-x86.cpp3
69 files changed, 1765 insertions, 386 deletions
diff --git a/js/src/jit/ABIFunctionList-inl.h b/js/src/jit/ABIFunctionList-inl.h
index eb2123f7a2..a1a992e77c 100644
--- a/js/src/jit/ABIFunctionList-inl.h
+++ b/js/src/jit/ABIFunctionList-inl.h
@@ -21,6 +21,7 @@
#include "builtin/Object.h" // js::ObjectClassToString
#include "builtin/RegExp.h" // js::RegExpPrototypeOptimizableRaw,
// js::RegExpInstanceOptimizableRaw
+#include "builtin/Sorting.h" // js::ArraySortData
#include "builtin/TestingFunctions.h" // js::FuzzilliHash*
#include "irregexp/RegExpAPI.h"
@@ -45,11 +46,12 @@
#include "proxy/Proxy.h" // js::ProxyGetProperty
-#include "vm/ArgumentsObject.h" // js::ArgumentsObject::finishForIonPure
-#include "vm/Interpreter.h" // js::TypeOfObject
-#include "vm/NativeObject.h" // js::NativeObject
-#include "vm/RegExpShared.h" // js::ExecuteRegExpAtomRaw
-#include "wasm/WasmBuiltins.h" // js::wasm::*
+#include "vm/ArgumentsObject.h" // js::ArgumentsObject::finishForIonPure
+#include "vm/Interpreter.h" // js::TypeOfObject
+#include "vm/NativeObject.h" // js::NativeObject
+#include "vm/RegExpShared.h" // js::ExecuteRegExpAtomRaw
+#include "vm/TypedArrayObject.h" // js::TypedArraySortFromJit
+#include "wasm/WasmBuiltins.h" // js::wasm::*
#include "builtin/Boolean-inl.h" // js::EmulatesUndefined
@@ -103,7 +105,8 @@ namespace jit {
_(js::ArgumentsObject::finishForIonPure) \
_(js::ArgumentsObject::finishInlineForIonPure) \
_(js::ArrayShiftMoveElements) \
- _(js::ArraySortData::sortWithComparator) \
+ _(js::ArraySortData::sortArrayWithComparator) \
+ _(js::ArraySortData::sortTypedArrayWithComparator) \
_(js::ArraySortFromJit) \
_(js::ecmaAtan2) \
_(js::ecmaHypot) \
@@ -175,6 +178,7 @@ namespace jit {
_(js::jit::StringTrimEndIndex) \
_(js::jit::StringTrimStartIndex) \
_(js::jit::TypeOfNameObject) \
+ _(js::jit::TypeOfEqObject) \
_(js::jit::WrapObjectPure) \
ABIFUNCTION_FUZZILLI_LIST(_) \
_(js::MapIteratorObject::next) \
@@ -189,6 +193,7 @@ namespace jit {
_(js::RegExpPrototypeOptimizableRaw) \
_(js::SetIteratorObject::next) \
_(js::StringToNumberPure) \
+ _(js::TypedArraySortFromJit) \
_(js::TypeOfObject) \
_(mozilla::SIMD::memchr16) \
_(mozilla::SIMD::memchr2x16) \
diff --git a/js/src/jit/Bailouts.cpp b/js/src/jit/Bailouts.cpp
index 3730d8997a..1d2657c399 100644
--- a/js/src/jit/Bailouts.cpp
+++ b/js/src/jit/Bailouts.cpp
@@ -54,9 +54,11 @@ class js::jit::BailoutStack {
# pragma pack(pop)
#endif
+#if !defined(JS_CODEGEN_NONE)
// Make sure the compiler doesn't add extra padding on 32-bit platforms.
static_assert((sizeof(BailoutStack) % 8) == 0,
"BailoutStack should be 8-byte aligned.");
+#endif
BailoutFrameInfo::BailoutFrameInfo(const JitActivationIterator& activations,
BailoutStack* bailout)
diff --git a/js/src/jit/BaselineCacheIRCompiler.cpp b/js/src/jit/BaselineCacheIRCompiler.cpp
index 92490ef8b8..29cada7037 100644
--- a/js/src/jit/BaselineCacheIRCompiler.cpp
+++ b/js/src/jit/BaselineCacheIRCompiler.cpp
@@ -427,7 +427,7 @@ bool BaselineCacheIRCompiler::emitGuardSpecificAtom(StringOperandId strId,
Address atomAddr(stubAddress(expectedOffset));
- Label done;
+ Label done, notCachedAtom;
masm.branchPtr(Assembler::Equal, atomAddr, str, &done);
// The pointers are not equal, so if the input string is also an atom it
@@ -435,6 +435,11 @@ bool BaselineCacheIRCompiler::emitGuardSpecificAtom(StringOperandId strId,
masm.branchTest32(Assembler::NonZero, Address(str, JSString::offsetOfFlags()),
Imm32(JSString::ATOM_BIT), failure->label());
+ masm.tryFastAtomize(str, scratch, scratch, &notCachedAtom);
+ masm.branchPtr(Assembler::Equal, atomAddr, scratch, &done);
+ masm.jump(failure->label());
+ masm.bind(&notCachedAtom);
+
// Check the length.
masm.loadPtr(atomAddr, scratch);
masm.loadStringLength(scratch, scratch);
@@ -1464,9 +1469,13 @@ bool BaselineCacheIRCompiler::emitHasClassResult(ObjOperandId objId,
void BaselineCacheIRCompiler::emitAtomizeString(Register str, Register temp,
Label* failure) {
- Label isAtom;
+ Label isAtom, notCachedAtom;
masm.branchTest32(Assembler::NonZero, Address(str, JSString::offsetOfFlags()),
Imm32(JSString::ATOM_BIT), &isAtom);
+ masm.tryFastAtomize(str, temp, str, &notCachedAtom);
+ masm.jump(&isAtom);
+ masm.bind(&notCachedAtom);
+
{
LiveRegisterSet save(GeneralRegisterSet::Volatile(),
liveVolatileFloatRegs());
@@ -2032,6 +2041,7 @@ bool BaselineCacheIRCompiler::init(CacheKind kind) {
break;
case CacheKind::GetProp:
case CacheKind::TypeOf:
+ case CacheKind::TypeOfEq:
case CacheKind::ToPropertyKey:
case CacheKind::GetIterator:
case CacheKind::OptimizeSpreadCall:
diff --git a/js/src/jit/BaselineCodeGen.cpp b/js/src/jit/BaselineCodeGen.cpp
index 8ab65458ea..ae930a40eb 100644
--- a/js/src/jit/BaselineCodeGen.cpp
+++ b/js/src/jit/BaselineCodeGen.cpp
@@ -4544,6 +4544,18 @@ bool BaselineCodeGen<Handler>::emit_TypeofExpr() {
}
template <typename Handler>
+bool BaselineCodeGen<Handler>::emit_TypeofEq() {
+ frame.popRegsAndSync(1);
+
+ if (!emitNextIC()) {
+ return false;
+ }
+
+ frame.push(R0);
+ return true;
+}
+
+template <typename Handler>
bool BaselineCodeGen<Handler>::emit_ThrowMsg() {
prepareVMCall();
pushUint8BytecodeOperandArg(R2.scratchReg());
diff --git a/js/src/jit/BaselineDebugModeOSR.cpp b/js/src/jit/BaselineDebugModeOSR.cpp
index 37b01fe93a..9a3b67ff4e 100644
--- a/js/src/jit/BaselineDebugModeOSR.cpp
+++ b/js/src/jit/BaselineDebugModeOSR.cpp
@@ -475,8 +475,9 @@ static void UndoRecompileBaselineScriptsForDebugMode(
for (UniqueScriptOSREntryIter iter(entries); !iter.done(); ++iter) {
const DebugModeOSREntry& entry = iter.entry();
JSScript* script = entry.script;
- BaselineScript* baselineScript = script->baselineScript();
if (entry.recompiled()) {
+ BaselineScript* baselineScript =
+ script->jitScript()->clearBaselineScript(cx->gcContext(), script);
script->jitScript()->setBaselineScript(script, entry.oldBaselineScript);
BaselineScript::Destroy(cx->gcContext(), baselineScript);
}
diff --git a/js/src/jit/BaselineIC.cpp b/js/src/jit/BaselineIC.cpp
index 705dcb8e46..88ff52e16e 100644
--- a/js/src/jit/BaselineIC.cpp
+++ b/js/src/jit/BaselineIC.cpp
@@ -32,6 +32,7 @@
#include "vm/JSFunction.h"
#include "vm/JSScript.h"
#include "vm/Opcodes.h"
+#include "vm/TypeofEqOperand.h" // TypeofEqOperand
#ifdef MOZ_VTUNE
# include "vtune/VTuneWrapper.h"
#endif
@@ -356,6 +357,8 @@ class MOZ_STATIC_CLASS OpToFallbackKindTable {
setKind(JSOp::Typeof, BaselineICFallbackKind::TypeOf);
setKind(JSOp::TypeofExpr, BaselineICFallbackKind::TypeOf);
+ setKind(JSOp::TypeofEq, BaselineICFallbackKind::TypeOfEq);
+
setKind(JSOp::ToPropertyKey, BaselineICFallbackKind::ToPropertyKey);
setKind(JSOp::Iter, BaselineICFallbackKind::GetIterator);
@@ -429,6 +432,7 @@ bool ICSupportsPolymorphicTypeData(JSOp op) {
switch (kind) {
case BaselineICFallbackKind::ToBool:
case BaselineICFallbackKind::TypeOf:
+ case BaselineICFallbackKind::TypeOfEq:
return true;
default:
return false;
@@ -1143,7 +1147,7 @@ bool DoGetNameFallback(JSContext* cx, BaselineFrame* frame,
static_assert(JSOpLength_GetGName == JSOpLength_GetName,
"Otherwise our check for JSOp::Typeof isn't ok");
- if (JSOp(pc[JSOpLength_GetGName]) == JSOp::Typeof) {
+ if (IsTypeOfNameOp(JSOp(pc[JSOpLength_GetGName]))) {
if (!GetEnvironmentName<GetNameMode::TypeOf>(cx, envChain, name, res)) {
return false;
}
@@ -2056,6 +2060,45 @@ bool FallbackICCodeCompiler::emit_TypeOf() {
}
//
+// TypeOfEq_Fallback
+//
+
+bool DoTypeOfEqFallback(JSContext* cx, BaselineFrame* frame,
+ ICFallbackStub* stub, HandleValue val,
+ MutableHandleValue res) {
+ stub->incrementEnteredCount();
+ MaybeNotifyWarp(frame->outerScript(), stub);
+ FallbackICSpew(cx, stub, "TypeOfEq");
+
+ jsbytecode* pc = StubOffsetToPc(stub, frame->script());
+ auto operand = TypeofEqOperand::fromRawValue(GET_UINT8(pc));
+ JSType type = operand.type();
+ JSOp compareOp = operand.compareOp();
+
+ TryAttachStub<TypeOfEqIRGenerator>("TypeOfEq", cx, frame, stub, val, type,
+ compareOp);
+
+ bool result = js::TypeOfValue(val) == type;
+ if (compareOp == JSOp::Ne) {
+ result = !result;
+ }
+ res.setBoolean(result);
+ return true;
+}
+
+bool FallbackICCodeCompiler::emit_TypeOfEq() {
+ EmitRestoreTailCallReg(masm);
+
+ masm.pushValue(R0);
+ masm.push(ICStubReg);
+ pushStubPayload(masm, R0.scratchReg());
+
+ using Fn = bool (*)(JSContext*, BaselineFrame*, ICFallbackStub*, HandleValue,
+ MutableHandleValue);
+ return tailCallVM<Fn, DoTypeOfEqFallback>(masm);
+}
+
+//
// ToPropertyKey_Fallback
//
diff --git a/js/src/jit/BaselineIC.h b/js/src/jit/BaselineIC.h
index 560955e27a..2b11702494 100644
--- a/js/src/jit/BaselineIC.h
+++ b/js/src/jit/BaselineIC.h
@@ -412,6 +412,10 @@ extern bool DoTypeOfFallback(JSContext* cx, BaselineFrame* frame,
ICFallbackStub* stub, HandleValue val,
MutableHandleValue res);
+extern bool DoTypeOfEqFallback(JSContext* cx, BaselineFrame* frame,
+ ICFallbackStub* stub, HandleValue val,
+ MutableHandleValue res);
+
extern bool DoToPropertyKeyFallback(JSContext* cx, BaselineFrame* frame,
ICFallbackStub* stub, HandleValue val,
MutableHandleValue res);
diff --git a/js/src/jit/BaselineICList.h b/js/src/jit/BaselineICList.h
index 7ff39357b5..4856283d20 100644
--- a/js/src/jit/BaselineICList.h
+++ b/js/src/jit/BaselineICList.h
@@ -35,6 +35,7 @@ namespace jit {
_(OptimizeSpreadCall) \
_(InstanceOf) \
_(TypeOf) \
+ _(TypeOfEq) \
_(ToPropertyKey) \
_(Rest) \
_(BinaryArith) \
diff --git a/js/src/jit/BranchHinting.cpp b/js/src/jit/BranchHinting.cpp
new file mode 100644
index 0000000000..01c311267f
--- /dev/null
+++ b/js/src/jit/BranchHinting.cpp
@@ -0,0 +1,61 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/BranchHinting.h"
+
+#include "jit/IonAnalysis.h"
+#include "jit/JitSpewer.h"
+#include "jit/MIRGenerator.h"
+#include "jit/MIRGraph.h"
+
+using namespace js;
+using namespace js::jit;
+
+// Implementation of the branch hinting proposal
+// Some control instructions (if and br_if) can have a hint of the form
+// Likely or unlikely. That means a specific branch will be likely/unlikely
+// to be executed at runtime.
+
+// In a first pass, we tag the basic blocks if we have a hint.
+// In a Mir to Mir transformation, we read the hints and do something with it:
+// - Unlikely blocks are pushed to the end of the function.
+// Because of Ion's structure, we don't do that for blocks inside a loop.
+// - TODO: do something for likely blocks.
+// - TODO: register allocator can be tuned depending on the hints.
+bool jit::BranchHinting(MIRGenerator* mir, MIRGraph& graph) {
+ JitSpew(JitSpew_BranchHint, "Beginning BranchHinting pass");
+
+ // Move towards the end all blocks marked as unlikely
+ mozilla::Vector<MBasicBlock*, 0> toBeMoved;
+
+ for (MBasicBlock* block : graph) {
+ // If this block has a return instruction, it's safe to push it
+ // to the end of the graph.
+ // If the block doesn't contain a return, a backedge outside a loop will be
+ // created, which would break ReversePostOrder assertions.
+ // Avoid moving a block if it's in the middle of a loop as well.
+ if (block->branchHintingUnlikely() && block->loopDepth() == 0 &&
+ block->hasLastIns() && block->lastIns()->is<js::jit::MWasmReturn>()) {
+ if (!toBeMoved.append(block)) {
+ return false;
+ }
+ }
+ }
+
+ for (MBasicBlock* block : toBeMoved) {
+#ifdef JS_JITSPEW
+ JitSpew(JitSpew_BranchHint, "Moving block%u to the end", block->id());
+#endif
+ graph.moveBlockToEnd(block);
+ }
+
+ if (!toBeMoved.empty()) {
+ // Renumber blocks after moving them around.
+ RenumberBlocks(graph);
+ }
+
+ return true;
+}
diff --git a/js/src/jit/BranchHinting.h b/js/src/jit/BranchHinting.h
new file mode 100644
index 0000000000..97665fd2c5
--- /dev/null
+++ b/js/src/jit/BranchHinting.h
@@ -0,0 +1,21 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_BranchHinting_h
+#define jit_BranchHinting_h
+
+// This file represents the wasm Branch Hinting optimization pass
+
+namespace js::jit {
+
+class MIRGenerator;
+class MIRGraph;
+
+[[nodiscard]] bool BranchHinting(MIRGenerator* mir, MIRGraph& graph);
+
+} // namespace js::jit
+
+#endif /* jit_BranchHinting_h */
diff --git a/js/src/jit/CacheIR.cpp b/js/src/jit/CacheIR.cpp
index 03eae14140..4de24905b8 100644
--- a/js/src/jit/CacheIR.cpp
+++ b/js/src/jit/CacheIR.cpp
@@ -49,7 +49,8 @@
#include "vm/ProxyObject.h"
#include "vm/RegExpObject.h"
#include "vm/SelfHosting.h"
-#include "vm/ThrowMsgKind.h" // ThrowCondition
+#include "vm/ThrowMsgKind.h" // ThrowCondition
+#include "vm/TypeofEqOperand.h" // TypeofEqOperand
#include "vm/Watchtower.h"
#include "wasm/WasmInstance.h"
@@ -109,6 +110,7 @@ size_t js::jit::NumInputsForCacheKind(CacheKind kind) {
return 0;
case CacheKind::GetProp:
case CacheKind::TypeOf:
+ case CacheKind::TypeOfEq:
case CacheKind::ToPropertyKey:
case CacheKind::GetIterator:
case CacheKind::ToBool:
@@ -3170,6 +3172,11 @@ AttachDecision GetPropIRGenerator::tryAttachTypedArrayElement(
auto* tarr = &obj->as<TypedArrayObject>();
+ if (tarr->type() == Scalar::Float16) {
+ // TODO: See Bug 1835034 for JIT support for Float16Array.
+ return AttachDecision::NoAction;
+ }
+
bool handleOOB = false;
int64_t indexInt64;
if (!ValueIsInt64Index(idVal_, &indexInt64) || indexInt64 < 0 ||
@@ -4530,6 +4537,7 @@ OperandId IRGenerator::emitNumericGuard(ValOperandId valId, const Value& v,
return writer.truncateDoubleToUInt32(numId);
}
+ case Scalar::Float16:
case Scalar::Float32:
case Scalar::Float64: {
if (v.isNumber()) {
@@ -5061,6 +5069,11 @@ AttachDecision SetPropIRGenerator::tryAttachSetTypedArrayElement(
auto* tarr = &obj->as<TypedArrayObject>();
Scalar::Type elementType = tarr->type();
+ if (elementType == Scalar::Float16) {
+ // TODO: See Bug 1835034 for JIT support for Float16Array.
+ return AttachDecision::NoAction;
+ }
+
// Don't attach if the input type doesn't match the guard added below.
if (!ValueCanConvertToNumeric(elementType, rhsVal_)) {
return AttachDecision::NoAction;
@@ -5800,6 +5813,77 @@ AttachDecision TypeOfIRGenerator::tryAttachObject(ValOperandId valId) {
return AttachDecision::Attach;
}
+TypeOfEqIRGenerator::TypeOfEqIRGenerator(JSContext* cx, HandleScript script,
+ jsbytecode* pc, ICState state,
+ HandleValue value, JSType type,
+ JSOp compareOp)
+ : IRGenerator(cx, script, pc, CacheKind::TypeOfEq, state),
+ val_(value),
+ type_(type),
+ compareOp_(compareOp) {}
+
+void TypeOfEqIRGenerator::trackAttached(const char* name) {
+ stubName_ = name ? name : "NotAttached";
+#ifdef JS_CACHEIR_SPEW
+ if (const CacheIRSpewer::Guard& sp = CacheIRSpewer::Guard(*this, name)) {
+ sp.valueProperty("val", val_);
+ sp.jstypeProperty("type", type_);
+ sp.opcodeProperty("compareOp", compareOp_);
+ }
+#endif
+}
+
+AttachDecision TypeOfEqIRGenerator::tryAttachStub() {
+ MOZ_ASSERT(cacheKind_ == CacheKind::TypeOfEq);
+
+ AutoAssertNoPendingException aanpe(cx_);
+
+ ValOperandId valId(writer.setInputOperandId(0));
+
+ TRY_ATTACH(tryAttachPrimitive(valId));
+ TRY_ATTACH(tryAttachObject(valId));
+
+ MOZ_ASSERT_UNREACHABLE("Failed to attach TypeOfEq");
+ return AttachDecision::NoAction;
+}
+
+AttachDecision TypeOfEqIRGenerator::tryAttachPrimitive(ValOperandId valId) {
+ if (!val_.isPrimitive()) {
+ return AttachDecision::NoAction;
+ }
+
+ // Note: we don't use GuardIsNumber for int32 values because it's less
+ // efficient in Warp (unboxing to double instead of int32).
+ if (val_.isDouble()) {
+ writer.guardIsNumber(valId);
+ } else {
+ writer.guardNonDoubleType(valId, val_.type());
+ }
+
+ bool result = js::TypeOfValue(val_) == type_;
+ if (compareOp_ == JSOp::Ne) {
+ result = !result;
+ }
+ writer.loadBooleanResult(result);
+ writer.returnFromIC();
+ writer.setTypeData(TypeData(JSValueType(val_.type())));
+ trackAttached("TypeOfEq.Primitive");
+ return AttachDecision::Attach;
+}
+
+AttachDecision TypeOfEqIRGenerator::tryAttachObject(ValOperandId valId) {
+ if (!val_.isObject()) {
+ return AttachDecision::NoAction;
+ }
+
+ ObjOperandId objId = writer.guardToObject(valId);
+ writer.loadTypeOfEqObjectResult(objId, TypeofEqOperand(type_, compareOp_));
+ writer.returnFromIC();
+ writer.setTypeData(TypeData(JSValueType(val_.type())));
+ trackAttached("TypeOfEq.Object");
+ return AttachDecision::Attach;
+}
+
GetIteratorIRGenerator::GetIteratorIRGenerator(JSContext* cx,
HandleScript script,
jsbytecode* pc, ICState state,
@@ -9127,6 +9211,7 @@ static bool AtomicsMeetsPreconditions(TypedArrayObject* typedArray,
case Scalar::BigUint64:
break;
+ case Scalar::Float16:
case Scalar::Float32:
case Scalar::Float64:
case Scalar::Uint8Clamped:
@@ -10308,7 +10393,7 @@ AttachDecision CallIRGenerator::tryAttachFunCall(HandleFunction callee) {
writer.guardNotClassConstructor(thisObjId);
if (isScripted) {
- writer.guardFunctionHasJitEntry(thisObjId, /*isConstructing =*/false);
+ writer.guardFunctionHasJitEntry(thisObjId);
writer.callScriptedFunction(thisObjId, argcId, targetFlags,
ClampFixedArgc(argc_));
} else {
@@ -11268,7 +11353,7 @@ AttachDecision CallIRGenerator::tryAttachFunApply(HandleFunction calleeFunc) {
if (isScripted) {
// Guard that function is scripted.
- writer.guardFunctionHasJitEntry(thisObjId, /*constructing =*/false);
+ writer.guardFunctionHasJitEntry(thisObjId);
writer.callScriptedFunction(thisObjId, argcId, targetFlags, fixedArgc);
} else {
// Guard that function is native.
@@ -12034,7 +12119,7 @@ void CallIRGenerator::emitCallScriptedGuards(ObjOperandId calleeObjId,
} else {
// Guard that object is a scripted function
writer.guardClass(calleeObjId, GuardClassKind::JSFunction);
- writer.guardFunctionHasJitEntry(calleeObjId, isConstructing);
+ writer.guardFunctionHasJitEntry(calleeObjId);
if (isConstructing) {
// If callee is not a constructor, we have to throw.
diff --git a/js/src/jit/CacheIR.h b/js/src/jit/CacheIR.h
index 132070d535..b33a5117c3 100644
--- a/js/src/jit/CacheIR.h
+++ b/js/src/jit/CacheIR.h
@@ -182,6 +182,7 @@ class TypedOperandId : public OperandId {
_(HasOwn) \
_(CheckPrivateField) \
_(TypeOf) \
+ _(TypeOfEq) \
_(ToPropertyKey) \
_(InstanceOf) \
_(GetIterator) \
diff --git a/js/src/jit/CacheIRCompiler.cpp b/js/src/jit/CacheIRCompiler.cpp
index 9a26b0816c..ceb14f0ecb 100644
--- a/js/src/jit/CacheIRCompiler.cpp
+++ b/js/src/jit/CacheIRCompiler.cpp
@@ -44,6 +44,7 @@
#include "vm/GeneratorObject.h"
#include "vm/GetterSetter.h"
#include "vm/Interpreter.h"
+#include "vm/TypeofEqOperand.h" // TypeofEqOperand
#include "vm/Uint8Clamped.h"
#include "builtin/Boolean-inl.h"
@@ -1055,6 +1056,15 @@ void CacheIRStubInfo::replaceStubRawWord(uint8_t* stubData, uint32_t offset,
*addr = newWord;
}
+void CacheIRStubInfo::replaceStubRawValueBits(uint8_t* stubData,
+ uint32_t offset, uint64_t oldBits,
+ uint64_t newBits) const {
+ MOZ_ASSERT(uint64_t(stubData + offset) % sizeof(uint64_t) == 0);
+ uint64_t* addr = reinterpret_cast<uint64_t*>(stubData + offset);
+ MOZ_ASSERT(*addr == oldBits);
+ *addr = newBits;
+}
+
template <class Stub, StubField::Type type>
typename MapStubFieldToType<type>::WrappedType& CacheIRStubInfo::getStubField(
Stub* stub, uint32_t offset) const {
@@ -2838,7 +2848,7 @@ bool CacheIRCompiler::emitStringToAtom(StringOperandId stringId) {
masm.branchTest32(Assembler::NonZero, Address(str, JSString::offsetOfFlags()),
Imm32(JSString::ATOM_BIT), &done);
- masm.lookupStringInAtomCacheLastLookups(str, scratch, str, &vmCall);
+ masm.tryFastAtomize(str, scratch, str, &vmCall);
masm.jump(&done);
masm.bind(&vmCall);
@@ -4670,8 +4680,7 @@ bool CacheIRCompiler::emitGuardNoAllocationMetadataBuilder(
return true;
}
-bool CacheIRCompiler::emitGuardFunctionHasJitEntry(ObjOperandId funId,
- bool constructing) {
+bool CacheIRCompiler::emitGuardFunctionHasJitEntry(ObjOperandId funId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register fun = allocator.useRegister(masm, funId);
@@ -4680,7 +4689,7 @@ bool CacheIRCompiler::emitGuardFunctionHasJitEntry(ObjOperandId funId,
return false;
}
- masm.branchIfFunctionHasNoJitEntry(fun, constructing, failure->label());
+ masm.branchIfFunctionHasNoJitEntry(fun, failure->label());
return true;
}
@@ -4694,8 +4703,7 @@ bool CacheIRCompiler::emitGuardFunctionHasNoJitEntry(ObjOperandId funId) {
return false;
}
- masm.branchIfFunctionHasJitEntry(obj, /*isConstructing =*/false,
- failure->label());
+ masm.branchIfFunctionHasJitEntry(obj, failure->label());
return true;
}
@@ -6565,6 +6573,7 @@ bool CacheIRCompiler::emitStoreTypedArrayElement(ObjOperandId objId,
valInt32.emplace(allocator.useRegister(masm, Int32OperandId(rhsId)));
break;
+ case Scalar::Float16:
case Scalar::Float32:
case Scalar::Float64:
allocator.ensureDoubleRegister(masm, NumberOperandId(rhsId),
@@ -7057,6 +7066,7 @@ bool CacheIRCompiler::emitStoreDataViewValueResult(
valInt32.emplace(allocator.useRegister(masm, Int32OperandId(valueId)));
break;
+ case Scalar::Float16:
case Scalar::Float32:
case Scalar::Float64:
allocator.ensureDoubleRegister(masm, NumberOperandId(valueId),
@@ -7400,6 +7410,69 @@ bool CacheIRCompiler::emitLoadTypeOfObjectResult(ObjOperandId objId) {
return true;
}
+bool CacheIRCompiler::emitLoadTypeOfEqObjectResult(ObjOperandId objId,
+ TypeofEqOperand operand) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ AutoOutputRegister output(*this);
+ Register obj = allocator.useRegister(masm, objId);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+ JSType type = operand.type();
+ JSOp compareOp = operand.compareOp();
+ bool result;
+
+ Label slowCheck, isObject, isCallable, isUndefined, done;
+ masm.typeOfObject(obj, scratch, &slowCheck, &isObject, &isCallable,
+ &isUndefined);
+
+ masm.bind(&isCallable);
+ result = type == JSTYPE_FUNCTION;
+ if (compareOp == JSOp::Ne) {
+ result = !result;
+ }
+ masm.moveValue(BooleanValue(result), output.valueReg());
+ masm.jump(&done);
+
+ masm.bind(&isUndefined);
+ result = type == JSTYPE_UNDEFINED;
+ if (compareOp == JSOp::Ne) {
+ result = !result;
+ }
+ masm.moveValue(BooleanValue(result), output.valueReg());
+ masm.jump(&done);
+
+ masm.bind(&isObject);
+ result = type == JSTYPE_OBJECT;
+ if (compareOp == JSOp::Ne) {
+ result = !result;
+ }
+ masm.moveValue(BooleanValue(result), output.valueReg());
+ masm.jump(&done);
+
+ {
+ masm.bind(&slowCheck);
+ LiveRegisterSet save(GeneralRegisterSet::Volatile(),
+ liveVolatileFloatRegs());
+ save.takeUnchecked(output.valueReg());
+ save.takeUnchecked(scratch);
+ masm.PushRegsInMask(save);
+
+ using Fn = bool (*)(JSObject* obj, TypeofEqOperand operand);
+ masm.setupUnalignedABICall(scratch);
+ masm.passABIArg(obj);
+ masm.move32(Imm32(TypeofEqOperand(type, compareOp).rawValue()), scratch);
+ masm.passABIArg(scratch);
+ masm.callWithABI<Fn, TypeOfEqObject>();
+ masm.storeCallBoolResult(scratch);
+
+ masm.PopRegsInMask(save);
+
+ masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
+ }
+
+ masm.bind(&done);
+ return true;
+}
+
bool CacheIRCompiler::emitLoadInt32TruthyResult(ValOperandId inputId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
diff --git a/js/src/jit/CacheIRCompiler.h b/js/src/jit/CacheIRCompiler.h
index 69b1dd34ac..67e90419a2 100644
--- a/js/src/jit/CacheIRCompiler.h
+++ b/js/src/jit/CacheIRCompiler.h
@@ -1420,6 +1420,9 @@ class CacheIRStubInfo {
void replaceStubRawWord(uint8_t* stubData, uint32_t offset, uintptr_t oldWord,
uintptr_t newWord) const;
+
+ void replaceStubRawValueBits(uint8_t* stubData, uint32_t offset,
+ uint64_t oldBits, uint64_t newBits) const;
};
template <typename T>
diff --git a/js/src/jit/CacheIRGenerator.h b/js/src/jit/CacheIRGenerator.h
index 2e15b2d8a6..778a49071e 100644
--- a/js/src/jit/CacheIRGenerator.h
+++ b/js/src/jit/CacheIRGenerator.h
@@ -460,6 +460,23 @@ class MOZ_RAII TypeOfIRGenerator : public IRGenerator {
AttachDecision tryAttachStub();
};
+class MOZ_RAII TypeOfEqIRGenerator : public IRGenerator {
+ HandleValue val_;
+ JSType type_;
+ JSOp compareOp_;
+
+ AttachDecision tryAttachPrimitive(ValOperandId valId);
+ AttachDecision tryAttachObject(ValOperandId valId);
+ void trackAttached(const char* name /* must be a C string literal */);
+
+ public:
+ TypeOfEqIRGenerator(JSContext* cx, HandleScript, jsbytecode* pc,
+ ICState state, HandleValue value, JSType type,
+ JSOp compareOp);
+
+ AttachDecision tryAttachStub();
+};
+
class MOZ_RAII GetIteratorIRGenerator : public IRGenerator {
HandleValue val_;
diff --git a/js/src/jit/CacheIROps.yaml b/js/src/jit/CacheIROps.yaml
index 2f3097dfd8..d5d34b4a6a 100644
--- a/js/src/jit/CacheIROps.yaml
+++ b/js/src/jit/CacheIROps.yaml
@@ -750,7 +750,6 @@
cost_estimate: 1
args:
fun: ObjId
- constructing: BoolImm
- name: GuardFunctionHasNoJitEntry
shared: true
@@ -2621,6 +2620,14 @@
args:
obj: ObjId
+- name: LoadTypeOfEqObjectResult
+ shared: true
+ transpile: true
+ cost_estimate: 4
+ args:
+ obj: ObjId
+ operand: TypeofEqOperandImm
+
- name: DoubleAddResult
shared: true
transpile: true
diff --git a/js/src/jit/CacheIRReader.h b/js/src/jit/CacheIRReader.h
index 59483424a3..a27fd3bb9a 100644
--- a/js/src/jit/CacheIRReader.h
+++ b/js/src/jit/CacheIRReader.h
@@ -18,6 +18,7 @@
#include "jit/CompactBuffer.h"
#include "js/ScalarType.h"
#include "js/Value.h"
+#include "vm/TypeofEqOperand.h" // TypeofEqOperand
#include "wasm/WasmValType.h"
enum class JSOp : uint8_t;
@@ -112,6 +113,9 @@ class MOZ_RAII CacheIRReader {
Scalar::Type scalarType() { return Scalar::Type(buffer_.readByte()); }
JSWhyMagic whyMagic() { return JSWhyMagic(buffer_.readByte()); }
JSOp jsop() { return JSOp(buffer_.readByte()); }
+ TypeofEqOperand typeofEqOperand() {
+ return TypeofEqOperand::fromRawValue(buffer_.readByte());
+ }
int32_t int32Immediate() { return int32_t(buffer_.readFixedUint32_t()); }
uint32_t uint32Immediate() { return buffer_.readFixedUint32_t(); }
void* pointer() { return buffer_.readRawPointer(); }
diff --git a/js/src/jit/CacheIRSpewer.cpp b/js/src/jit/CacheIRSpewer.cpp
index 613e0f7d85..fbc8a50b04 100644
--- a/js/src/jit/CacheIRSpewer.cpp
+++ b/js/src/jit/CacheIRSpewer.cpp
@@ -71,6 +71,10 @@ class MOZ_RAII CacheIROpsJitSpewer {
void spewJSOpImm(const char* name, JSOp op) {
out_.printf("%s JSOp::%s", name, CodeName(op));
}
+ void spewTypeofEqOperandImm(const char* name, TypeofEqOperand operand) {
+ out_.printf("%s %s %s", name, JSTypeToString(operand.type()),
+ CodeName(operand.compareOp()));
+ }
void spewStaticStringImm(const char* name, const char* str) {
out_.printf("%s \"%s\"", name, str);
}
@@ -223,6 +227,9 @@ class MOZ_RAII CacheIROpsJSONSpewer {
void spewJSOpImm(const char* name, JSOp op) {
spewArgImpl(name, "JSOp", CodeName(op));
}
+ void spewTypeofEqOperandImm(const char* name, TypeofEqOperand operand) {
+ spewArgImpl(name, "TypeofEqOperand", uint8_t(operand.rawValue()));
+ }
void spewStaticStringImm(const char* name, const char* str) {
spewArgImpl(name, "String", str);
}
@@ -430,6 +437,15 @@ void CacheIRSpewer::opcodeProperty(const char* name, const JSOp op) {
j.endStringProperty();
}
+void CacheIRSpewer::jstypeProperty(const char* name, const JSType type) {
+ MOZ_ASSERT(enabled());
+ JSONPrinter& j = json_.ref();
+
+ j.beginStringProperty(name);
+ output_.put(JSTypeToString(type));
+ j.endStringProperty();
+}
+
void CacheIRSpewer::cacheIRSequence(CacheIRReader& reader) {
MOZ_ASSERT(enabled());
JSONPrinter& j = json_.ref();
diff --git a/js/src/jit/CacheIRSpewer.h b/js/src/jit/CacheIRSpewer.h
index fba33ba990..6ceb1bb106 100644
--- a/js/src/jit/CacheIRSpewer.h
+++ b/js/src/jit/CacheIRSpewer.h
@@ -53,6 +53,7 @@ class CacheIRSpewer {
void beginCache(const IRGenerator& generator);
void valueProperty(const char* name, const Value& v);
void opcodeProperty(const char* name, const JSOp op);
+ void jstypeProperty(const char* name, const JSType type);
void cacheIRSequence(CacheIRReader& reader);
void attached(const char* name);
void endCache();
@@ -101,6 +102,10 @@ class CacheIRSpewer {
sp_.opcodeProperty(name, op);
}
+ void jstypeProperty(const char* name, const JSType type) const {
+ sp_.jstypeProperty(name, type);
+ }
+
explicit operator bool() const { return sp_.enabled(); }
};
};
diff --git a/js/src/jit/CacheIRWriter.h b/js/src/jit/CacheIRWriter.h
index 6a32885d7c..d888012134 100644
--- a/js/src/jit/CacheIRWriter.h
+++ b/js/src/jit/CacheIRWriter.h
@@ -43,6 +43,7 @@
#include "vm/Opcodes.h"
#include "vm/RealmFuses.h"
#include "vm/Shape.h"
+#include "vm/TypeofEqOperand.h" // TypeofEqOperand
#include "wasm/WasmConstants.h"
#include "wasm/WasmValType.h"
@@ -257,6 +258,9 @@ class MOZ_RAII CacheIRWriter : public JS::CustomAutoRooter {
static_assert(sizeof(JSOp) == sizeof(uint8_t), "JSOp must fit in a byte");
buffer_.writeByte(uint8_t(op));
}
+ void writeTypeofEqOperandImm(TypeofEqOperand operand) {
+ buffer_.writeByte(operand.rawValue());
+ }
void writeGuardClassKindImm(GuardClassKind kind) {
static_assert(sizeof(GuardClassKind) == sizeof(uint8_t),
"GuardClassKind must fit in a byte");
diff --git a/js/src/jit/CodeGenerator.cpp b/js/src/jit/CodeGenerator.cpp
index 559ac50cc7..ef4cd5d851 100644
--- a/js/src/jit/CodeGenerator.cpp
+++ b/js/src/jit/CodeGenerator.cpp
@@ -77,6 +77,7 @@
#include "vm/StringType.h"
#include "vm/TypedArrayObject.h"
#include "wasm/WasmCodegenConstants.h"
+#include "wasm/WasmPI.h"
#include "wasm/WasmValType.h"
#ifdef MOZ_VTUNE
# include "vtune/VTuneWrapper.h"
@@ -972,6 +973,7 @@ void CodeGenerator::visitOutOfLineICFallback(OutOfLineICFallback* ool) {
}
case CacheKind::Call:
case CacheKind::TypeOf:
+ case CacheKind::TypeOfEq:
case CacheKind::ToBool:
case CacheKind::GetIntrinsic:
case CacheKind::NewArray:
@@ -2328,18 +2330,37 @@ void CreateDependentString::generate(MacroAssembler& masm,
masm.addToCharPtr(temp1_, temp2_, encoding_);
masm.storeNonInlineStringChars(temp1_, string_);
masm.storeDependentStringBase(base, string_);
- masm.movePtr(base, temp1_);
+
+ // Ensure that the depended-on string is flagged as such, so we don't
+ // convert it into a forwarded atom
+ masm.load32(Address(base, JSString::offsetOfFlags()), temp2_);
+ Label skipDependedOn;
+ masm.branchTest32(Assembler::NonZero, temp2_, Imm32(JSString::ATOM_BIT),
+ &skipDependedOn);
+ masm.or32(Imm32(JSString::DEPENDED_ON_BIT), temp2_);
+ masm.store32(temp2_, Address(base, JSString::offsetOfFlags()));
+ masm.bind(&skipDependedOn);
// Follow any base pointer if the input is itself a dependent string.
// Watch for undepended strings, which have a base pointer but don't
// actually share their characters with it.
Label noBase;
- masm.load32(Address(base, JSString::offsetOfFlags()), temp2_);
+ masm.movePtr(base, temp1_);
masm.and32(Imm32(JSString::TYPE_FLAGS_MASK), temp2_);
masm.branchTest32(Assembler::Zero, temp2_, Imm32(JSString::DEPENDENT_BIT),
&noBase);
masm.loadDependentStringBase(base, temp1_);
masm.storeDependentStringBase(temp1_, string_);
+#ifdef DEBUG
+ Label isAppropriatelyMarked;
+ masm.branchTest32(Assembler::NonZero,
+ Address(temp1_, JSString::offsetOfFlags()),
+ Imm32(JSString::ATOM_BIT | JSString::DEPENDED_ON_BIT),
+ &isAppropriatelyMarked);
+ masm.assumeUnreachable("Base chain missing DEPENDED_ON_BIT");
+ masm.bind(&isAppropriatelyMarked);
+#endif
+
masm.bind(&noBase);
// Post-barrier the base store, whether it was the direct or indirect
@@ -5495,21 +5516,10 @@ void CodeGenerator::visitAssertCanElidePostWriteBarrier(
}
template <typename LCallIns>
-void CodeGenerator::emitCallNative(LCallIns* call, JSNative native) {
- MCallBase* mir = call->mir();
-
- uint32_t unusedStack = UnusedStackBytesForCall(mir->paddedNumStackArgs());
-
- // Registers used for callWithABI() argument-passing.
- const Register argContextReg = ToRegister(call->getArgContextReg());
- const Register argUintNReg = ToRegister(call->getArgUintNReg());
- const Register argVpReg = ToRegister(call->getArgVpReg());
-
- // Misc. temporary registers.
- const Register tempReg = ToRegister(call->getTempReg());
-
- DebugOnly<uint32_t> initialStack = masm.framePushed();
-
+void CodeGenerator::emitCallNative(LCallIns* call, JSNative native,
+ Register argContextReg, Register argUintNReg,
+ Register argVpReg, Register tempReg,
+ uint32_t unusedStack) {
masm.checkStackAlignment();
// Native functions have the signature:
@@ -5524,17 +5534,21 @@ void CodeGenerator::emitCallNative(LCallIns* call, JSNative native) {
// Push a Value containing the callee object: natives are allowed to access
// their callee before setting the return value. The StackPointer is moved
// to &vp[0].
+ //
+ // Also reserves the space for |NativeExitFrameLayout::{lo,hi}CalleeResult_|.
if constexpr (std::is_same_v<LCallIns, LCallClassHook>) {
Register calleeReg = ToRegister(call->getCallee());
masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(calleeReg)));
+ // Enter the callee realm.
if (call->mir()->maybeCrossRealm()) {
masm.switchToObjectRealm(calleeReg, tempReg);
}
} else {
- WrappedFunction* target = call->getSingleTarget();
+ WrappedFunction* target = call->mir()->getSingleTarget();
masm.Push(ObjectValue(*target->rawNativeJSFunction()));
+ // Enter the callee realm.
if (call->mir()->maybeCrossRealm()) {
masm.movePtr(ImmGCPtr(target->rawNativeJSFunction()), tempReg);
masm.switchToObjectRealm(tempReg, tempReg);
@@ -5543,12 +5557,17 @@ void CodeGenerator::emitCallNative(LCallIns* call, JSNative native) {
// Preload arguments into registers.
masm.loadJSContext(argContextReg);
- masm.move32(Imm32(call->mir()->numActualArgs()), argUintNReg);
masm.moveStackPtrTo(argVpReg);
+ // Initialize |NativeExitFrameLayout::argc_|.
masm.Push(argUintNReg);
// Construct native exit frame.
+ //
+ // |buildFakeExitFrame| initializes |NativeExitFrameLayout::exit_| and
+ // |enterFakeExitFrameForNative| initializes |NativeExitFrameLayout::footer_|.
+ //
+ // The NativeExitFrameLayout is now fully initialized.
uint32_t safepointOffset = masm.buildFakeExitFrame(tempReg);
masm.enterFakeExitFrameForNative(argContextReg, tempReg,
call->mir()->isConstructing());
@@ -5581,6 +5600,7 @@ void CodeGenerator::emitCallNative(LCallIns* call, JSNative native) {
// Test for failure.
masm.branchIfFalseBool(ReturnReg, masm.failureLabel());
+ // Exit the callee realm.
if (call->mir()->maybeCrossRealm()) {
masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
}
@@ -5593,10 +5613,44 @@ void CodeGenerator::emitCallNative(LCallIns* call, JSNative native) {
// Until C++ code is instrumented against Spectre, prevent speculative
// execution from returning any private data.
if (JitOptions.spectreJitToCxxCalls && !call->mir()->ignoresReturnValue() &&
- mir->hasLiveDefUses()) {
+ call->mir()->hasLiveDefUses()) {
masm.speculationBarrier();
}
+#ifdef DEBUG
+ // Native constructors are guaranteed to return an Object value.
+ if (call->mir()->isConstructing()) {
+ Label notPrimitive;
+ masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
+ &notPrimitive);
+ masm.assumeUnreachable("native constructors don't return primitives");
+ masm.bind(&notPrimitive);
+ }
+#endif
+}
+
+template <typename LCallIns>
+void CodeGenerator::emitCallNative(LCallIns* call, JSNative native) {
+ uint32_t unusedStack =
+ UnusedStackBytesForCall(call->mir()->paddedNumStackArgs());
+
+ // Registers used for callWithABI() argument-passing.
+ const Register argContextReg = ToRegister(call->getArgContextReg());
+ const Register argUintNReg = ToRegister(call->getArgUintNReg());
+ const Register argVpReg = ToRegister(call->getArgVpReg());
+
+ // Misc. temporary registers.
+ const Register tempReg = ToRegister(call->getTempReg());
+
+ DebugOnly<uint32_t> initialStack = masm.framePushed();
+
+ // Initialize the argc register.
+ masm.move32(Imm32(call->mir()->numActualArgs()), argUintNReg);
+
+ // Create the exit frame and call the native.
+ emitCallNative(call, native, argContextReg, argUintNReg, argVpReg, tempReg,
+ unusedStack);
+
// The next instruction is removing the footer of the exit frame, so there
// is no need for leaveFakeExitFrame.
@@ -5962,7 +6016,7 @@ void JitRuntime::generateIonGenericCallStub(MacroAssembler& masm,
masm.switchToObjectRealm(calleeReg, scratch);
// Load jitCodeRaw for callee if it exists.
- masm.branchIfFunctionHasNoJitEntry(calleeReg, isConstructing, &noJitEntry);
+ masm.branchIfFunctionHasNoJitEntry(calleeReg, &noJitEntry);
// ****************************
// * Functions with jit entry *
@@ -6736,7 +6790,7 @@ void CodeGenerator::emitApplyGeneric(T* apply) {
}
// Guard that calleereg is an interpreted function with a JSScript.
- masm.branchIfFunctionHasNoJitEntry(calleereg, constructing, &invoke);
+ masm.branchIfFunctionHasNoJitEntry(calleereg, &invoke);
// Guard that callee allows the [[Call]] or [[Construct]] operation required.
if (constructing) {
@@ -6841,15 +6895,35 @@ void CodeGenerator::emitApplyGeneric(T* apply) {
}
template <typename T>
-void CodeGenerator::emitCallInvokeNativeFunction(T* apply) {
- pushArg(masm.getStackPointer()); // argv.
- pushArg(ToRegister(apply->getArgc())); // argc.
- pushArg(Imm32(apply->mir()->ignoresReturnValue())); // ignoresReturnValue.
- pushArg(Imm32(apply->mir()->isConstructing())); // isConstructing.
+void CodeGenerator::emitAlignStackForApplyNative(T* apply, Register argc) {
+ static_assert(JitStackAlignment % ABIStackAlignment == 0,
+ "aligning on JIT stack subsumes ABI alignment");
- using Fn =
- bool (*)(JSContext*, bool, bool, uint32_t, Value*, MutableHandleValue);
- callVM<Fn, jit::InvokeNativeFunction>(apply);
+ // Align the arguments on the JitStackAlignment.
+ if (JitStackValueAlignment > 1) {
+ MOZ_ASSERT(JitStackValueAlignment == 2,
+ "Stack padding adds exactly one Value");
+ MOZ_ASSERT(frameSize() % JitStackValueAlignment == 0,
+ "Stack padding assumes that the frameSize is correct");
+
+ Assembler::Condition cond;
+ if constexpr (T::isConstructing()) {
+ // If the number of arguments is even, then we do not need any padding.
+ //
+ // Also see emitAllocateSpaceForApply().
+ cond = Assembler::Zero;
+ } else {
+ // If the number of arguments is odd, then we do not need any padding.
+ //
+ // Also see emitAllocateSpaceForConstructAndPushNewTarget().
+ cond = Assembler::NonZero;
+ }
+
+ Label noPaddingNeeded;
+ masm.branchTestPtr(cond, argc, Imm32(1), &noPaddingNeeded);
+ masm.pushValue(MagicValue(JS_ARG_POISON));
+ masm.bind(&noPaddingNeeded);
+ }
}
template <typename T>
@@ -6859,11 +6933,19 @@ void CodeGenerator::emitPushNativeArguments(T* apply) {
Register scratch = ToRegister(apply->getTempForArgCopy());
uint32_t extraFormals = apply->numExtraFormals();
+ // Align stack.
+ emitAlignStackForApplyNative(apply, argc);
+
+ // Push newTarget.
+ if constexpr (T::isConstructing()) {
+ masm.pushValue(JSVAL_TYPE_OBJECT, ToRegister(apply->getNewTarget()));
+ }
+
// Push arguments.
Label noCopy;
masm.branchTestPtr(Assembler::Zero, argc, argc, &noCopy);
{
- // Use scratch register to calculate stack space (no padding needed).
+ // Use scratch register to calculate stack space.
masm.movePtr(argc, scratch);
// Reserve space for copying the arguments.
@@ -6885,6 +6967,13 @@ void CodeGenerator::emitPushNativeArguments(T* apply) {
argvDstOffset);
}
masm.bind(&noCopy);
+
+ // Push |this|.
+ if constexpr (T::isConstructing()) {
+ masm.pushValue(MagicValue(JS_IS_CONSTRUCTING));
+ } else {
+ masm.pushValue(ToValue(apply, T::ThisIndex));
+ }
}
template <typename T>
@@ -6904,6 +6993,14 @@ void CodeGenerator::emitPushArrayAsNativeArguments(T* apply) {
// The array length is our argc.
masm.load32(Address(elements, ObjectElements::offsetOfLength()), tmpArgc);
+ // Align stack.
+ emitAlignStackForApplyNative(apply, tmpArgc);
+
+ // Push newTarget.
+ if constexpr (T::isConstructing()) {
+ masm.pushValue(JSVAL_TYPE_OBJECT, ToRegister(apply->getNewTarget()));
+ }
+
// Skip the copy of arguments if there are none.
Label noCopy;
masm.branchTestPtr(Assembler::Zero, tmpArgc, tmpArgc, &noCopy);
@@ -6919,8 +7016,15 @@ void CodeGenerator::emitPushArrayAsNativeArguments(T* apply) {
}
masm.bind(&noCopy);
- // Set argc in preparation for emitCallInvokeNativeFunction.
+ // Set argc in preparation for calling the native function.
masm.load32(Address(elements, ObjectElements::offsetOfLength()), argc);
+
+ // Push |this|.
+ if constexpr (T::isConstructing()) {
+ masm.pushValue(MagicValue(JS_IS_CONSTRUCTING));
+ } else {
+ masm.pushValue(ToValue(apply, T::ThisIndex));
+ }
}
void CodeGenerator::emitPushArguments(LApplyArgsNative* apply) {
@@ -6944,6 +7048,7 @@ void CodeGenerator::emitPushArguments(LApplyArgsObjNative* apply) {
Register argsObj = ToRegister(apply->getArgsObj());
Register tmpArgc = ToRegister(apply->getTempObject());
Register scratch = ToRegister(apply->getTempForArgCopy());
+ Register scratch2 = ToRegister(apply->getTempExtra());
// NB: argc and argsObj are mapped to the same register.
MOZ_ASSERT(argc == argsObj);
@@ -6951,11 +7056,14 @@ void CodeGenerator::emitPushArguments(LApplyArgsObjNative* apply) {
// Load argc into tmpArgc.
masm.loadArgumentsObjectLength(argsObj, tmpArgc);
+ // Align stack.
+ emitAlignStackForApplyNative(apply, tmpArgc);
+
// Push arguments.
Label noCopy, epilogue;
masm.branchTestPtr(Assembler::Zero, tmpArgc, tmpArgc, &noCopy);
{
- // Use scratch register to calculate stack space (no padding needed).
+ // Use scratch register to calculate stack space.
masm.movePtr(tmpArgc, scratch);
// Reserve space for copying the arguments.
@@ -6970,56 +7078,65 @@ void CodeGenerator::emitPushArguments(LApplyArgsObjNative* apply) {
size_t argvSrcOffset = ArgumentsData::offsetOfArgs();
size_t argvDstOffset = 0;
- // Stash away |tmpArgc| and adjust argvDstOffset accordingly.
- masm.push(tmpArgc);
- argvDstOffset += sizeof(void*);
+ Register argvIndex = scratch2;
+ masm.move32(tmpArgc, argvIndex);
// Copy the values.
- emitCopyValuesForApply(argvSrcBase, tmpArgc, scratch, argvSrcOffset,
+ emitCopyValuesForApply(argvSrcBase, argvIndex, scratch, argvSrcOffset,
argvDstOffset);
-
- // Set argc in preparation for emitCallInvokeNativeFunction.
- masm.pop(argc);
- masm.jump(&epilogue);
}
masm.bind(&noCopy);
- {
- // Set argc in preparation for emitCallInvokeNativeFunction.
- masm.movePtr(ImmWord(0), argc);
- }
- masm.bind(&epilogue);
+
+ // Set argc in preparation for calling the native function.
+ masm.movePtr(tmpArgc, argc);
+
+ // Push |this|.
+ masm.pushValue(ToValue(apply, LApplyArgsObjNative::ThisIndex));
}
template <typename T>
void CodeGenerator::emitApplyNative(T* apply) {
- MOZ_ASSERT(apply->mir()->getSingleTarget()->isNativeWithoutJitEntry());
-
- constexpr bool isConstructing = T::isConstructing();
- MOZ_ASSERT(isConstructing == apply->mir()->isConstructing(),
+ MOZ_ASSERT(T::isConstructing() == apply->mir()->isConstructing(),
"isConstructing condition must be consistent");
- // Push newTarget.
- if constexpr (isConstructing) {
- masm.pushValue(JSVAL_TYPE_OBJECT, ToRegister(apply->getNewTarget()));
+ WrappedFunction* target = apply->mir()->getSingleTarget();
+ MOZ_ASSERT(target->isNativeWithoutJitEntry());
+
+ JSNative native = target->native();
+ if (apply->mir()->ignoresReturnValue() && target->hasJitInfo()) {
+ const JSJitInfo* jitInfo = target->jitInfo();
+ if (jitInfo->type() == JSJitInfo::IgnoresReturnValueNative) {
+ native = jitInfo->ignoresReturnValueMethod;
+ }
}
- // Push arguments.
+ // Push arguments, including newTarget and |this|.
emitPushArguments(apply);
- // Push |this|.
- if constexpr (isConstructing) {
- masm.pushValue(MagicValue(JS_IS_CONSTRUCTING));
- } else {
- masm.pushValue(ToValue(apply, T::ThisIndex));
- }
+ // Registers used for callWithABI() argument-passing.
+ Register argContextReg = ToRegister(apply->getTempObject());
+ Register argUintNReg = ToRegister(apply->getArgc());
+ Register argVpReg = ToRegister(apply->getTempForArgCopy());
+ Register tempReg = ToRegister(apply->getTempExtra());
+
+ // No unused stack for variadic calls.
+ uint32_t unusedStack = 0;
+
+ // Pushed arguments don't change the pushed frames amount.
+ MOZ_ASSERT(masm.framePushed() == frameSize());
- // Push callee.
- masm.pushValue(JSVAL_TYPE_OBJECT, ToRegister(apply->getFunction()));
+ // Create the exit frame and call the native.
+ emitCallNative(apply, native, argContextReg, argUintNReg, argVpReg, tempReg,
+ unusedStack);
- // Call the native function.
- emitCallInvokeNativeFunction(apply);
+ // The exit frame is still on the stack.
+ MOZ_ASSERT(masm.framePushed() == frameSize() + NativeExitFrameLayout::Size());
+
+ // The next instruction is removing the exit frame, so there is no need for
+ // leaveFakeExitFrame.
// Pop arguments and continue.
+ masm.setFramePushed(frameSize());
emitRestoreStackPointerFromFP();
}
@@ -9303,6 +9420,475 @@ void CodeGenerator::visitWasmCall(LWasmCall* lir) {
}
}
+#ifdef ENABLE_WASM_JSPI
+void CodeGenerator::callWasmUpdateSuspenderState(
+ wasm::UpdateSuspenderStateAction kind, Register suspender) {
+ masm.Push(InstanceReg);
+ int32_t framePushedAfterInstance = masm.framePushed();
+
+ masm.move32(Imm32(uint32_t(kind)), ScratchReg);
+
+ masm.setupWasmABICall();
+ masm.passABIArg(InstanceReg);
+ masm.passABIArg(suspender);
+ masm.passABIArg(ScratchReg);
+ int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
+ masm.callWithABI(wasm::BytecodeOffset(0),
+ wasm::SymbolicAddress::UpdateSuspenderState,
+ mozilla::Some(instanceOffset));
+
+ masm.Pop(InstanceReg);
+}
+#endif // ENABLE_WASM_JSPI
+
+void CodeGenerator::visitWasmStackSwitchToSuspendable(
+ LWasmStackSwitchToSuspendable* lir) {
+#ifdef ENABLE_WASM_JSPI
+ const Register SuspenderReg = lir->suspender()->toRegister().gpr();
+ const Register FnReg = lir->fn()->toRegister().gpr();
+ const Register DataReg = lir->data()->toRegister().gpr();
+ const Register SuspenderDataReg = ABINonArgReg3;
+
+# ifdef JS_CODEGEN_ARM64
+ vixl::UseScratchRegisterScope temps(&masm);
+ const Register ScratchReg = temps.AcquireX().asUnsized();
+# elif !defined(JS_CODEGEN_X64)
+# error "NYI: scratch register"
+# endif
+
+ masm.Push(SuspenderReg);
+ masm.Push(FnReg);
+ masm.Push(DataReg);
+
+ callWasmUpdateSuspenderState(wasm::UpdateSuspenderStateAction::Enter,
+ SuspenderReg);
+ masm.Pop(DataReg);
+ masm.Pop(FnReg);
+ masm.Pop(SuspenderReg);
+
+ masm.Push(SuspenderReg);
+ int32_t framePushedAtSuspender = masm.framePushed();
+ masm.Push(InstanceReg);
+
+ wasm::CallSiteDesc desc(wasm::CallSiteDesc::Kind::StackSwitch);
+ CodeLabel returnCallsite;
+
+ // Aligning stack before trampoline call.
+ uint32_t reserve = ComputeByteAlignment(
+ masm.framePushed() - sizeof(wasm::Frame), WasmStackAlignment);
+ masm.reserveStack(reserve);
+
+ masm.loadPrivate(Address(SuspenderReg, NativeObject::getFixedSlotOffset(
+ wasm::SuspenderObjectDataSlot)),
+ SuspenderDataReg);
+
+ // Switch stacks to suspendable, keep original FP to maintain
+ // frames chain between main and suspendable stack segments.
+ masm.storeStackPtr(
+ Address(SuspenderDataReg, wasm::SuspenderObjectData::offsetOfMainSP()));
+ masm.storePtr(
+ FramePointer,
+ Address(SuspenderDataReg, wasm::SuspenderObjectData::offsetOfMainFP()));
+
+ masm.loadStackPtr(Address(
+ SuspenderDataReg, wasm::SuspenderObjectData::offsetOfSuspendableSP()));
+
+ masm.assertStackAlignment(WasmStackAlignment);
+
+ // The FramePointer is not changed for SwitchToSuspendable.
+ uint32_t framePushed = masm.framePushed();
+
+ // On different stack, reset framePushed. FramePointer is not valid here.
+ masm.setFramePushed(0);
+
+ // Pass the suspender and data params through the wasm function ABI registers.
+ WasmABIArgGenerator abi;
+ ABIArg arg;
+ arg = abi.next(MIRType::Pointer);
+ MOZ_RELEASE_ASSERT(arg.kind() == ABIArg::GPR);
+ masm.movePtr(SuspenderReg, arg.gpr());
+ arg = abi.next(MIRType::Pointer);
+ MOZ_RELEASE_ASSERT(arg.kind() == ABIArg::GPR);
+ masm.movePtr(DataReg, arg.gpr());
+ unsigned reserveBeforeCall = abi.stackBytesConsumedSoFar();
+
+ MOZ_ASSERT(masm.framePushed() == 0);
+ unsigned argDecrement =
+ StackDecrementForCall(WasmStackAlignment, 0, reserveBeforeCall);
+ masm.reserveStack(argDecrement);
+
+ masm.storePtr(InstanceReg, Address(masm.getStackPointer(),
+ WasmCallerInstanceOffsetBeforeCall));
+ // Get wasm instance pointer for callee.
+ size_t instanceSlotOffset = FunctionExtended::offsetOfExtendedSlot(
+ FunctionExtended::WASM_INSTANCE_SLOT);
+ masm.loadPtr(Address(FnReg, instanceSlotOffset), InstanceReg);
+
+ masm.storePtr(InstanceReg, Address(masm.getStackPointer(),
+ WasmCalleeInstanceOffsetBeforeCall));
+ masm.loadWasmPinnedRegsFromInstance();
+
+ masm.assertStackAlignment(WasmStackAlignment);
+
+ const Register ReturnAddressReg = ScratchReg;
+
+ // Save future of suspendable stack exit frame pointer.
+ masm.computeEffectiveAddress(
+ Address(masm.getStackPointer(), -int32_t(sizeof(wasm::Frame))),
+ ScratchReg);
+ masm.storePtr(
+ ScratchReg,
+ Address(SuspenderDataReg,
+ wasm::SuspenderObjectData::offsetOfSuspendableExitFP()));
+
+ masm.mov(&returnCallsite, ReturnAddressReg);
+
+ // Call wasm function fast.
+# ifdef JS_USE_LINK_REGISTER
+ masm.mov(ReturnAddressReg, lr);
+# else
+ masm.Push(ReturnAddressReg);
+# endif
+ // Get funcUncheckedCallEntry() from the function's
+ // WASM_FUNC_UNCHECKED_ENTRY_SLOT extended slot.
+ size_t uncheckedEntrySlotOffset = FunctionExtended::offsetOfExtendedSlot(
+ FunctionExtended::WASM_FUNC_UNCHECKED_ENTRY_SLOT);
+ masm.loadPtr(Address(FnReg, uncheckedEntrySlotOffset), ScratchReg);
+ masm.jump(ScratchReg);
+
+ // About to use valid FramePointer -- restore framePushed.
+ masm.setFramePushed(framePushed);
+
+ // For IsPlausibleStackMapKey check for the following callsite.
+ masm.wasmTrapInstruction();
+
+ // Callsite for return from main stack.
+ masm.bind(&returnCallsite);
+ masm.append(desc, *returnCallsite.target());
+ masm.addCodeLabel(returnCallsite);
+
+ masm.assertStackAlignment(WasmStackAlignment);
+
+ markSafepointAt(returnCallsite.target()->offset(), lir);
+ lir->safepoint()->setFramePushedAtStackMapBase(framePushed);
+ lir->safepoint()->setWasmSafepointKind(WasmSafepointKind::StackSwitch);
+ // Rooting SuspenderReg.
+ masm.propagateOOM(
+ lir->safepoint()->addWasmAnyRefSlot(true, framePushedAtSuspender));
+
+ masm.freeStackTo(framePushed);
+
+ masm.freeStack(reserve);
+ masm.Pop(InstanceReg);
+ masm.Pop(SuspenderReg);
+
+ // Using SuspenderDataReg and DataReg as temps.
+ masm.switchToWasmInstanceRealm(SuspenderDataReg, DataReg);
+
+ callWasmUpdateSuspenderState(wasm::UpdateSuspenderStateAction::Leave,
+ SuspenderReg);
+#else
+ MOZ_CRASH("NYI");
+#endif // ENABLE_WASM_JSPI
+}
+
+void CodeGenerator::visitWasmStackSwitchToMain(LWasmStackSwitchToMain* lir) {
+#ifdef ENABLE_WASM_JSPI
+ const Register SuspenderReg = lir->suspender()->toRegister().gpr();
+ const Register FnReg = lir->fn()->toRegister().gpr();
+ const Register DataReg = lir->data()->toRegister().gpr();
+ const Register SuspenderDataReg = ABINonArgReg3;
+
+# ifdef JS_CODEGEN_ARM64
+ vixl::UseScratchRegisterScope temps(&masm);
+ const Register ScratchReg = temps.AcquireX().asUnsized();
+# elif !defined(JS_CODEGEN_X64)
+# error "NYI: scratch register"
+# endif
+
+ masm.Push(SuspenderReg);
+ masm.Push(FnReg);
+ masm.Push(DataReg);
+
+ callWasmUpdateSuspenderState(wasm::UpdateSuspenderStateAction::Suspend,
+ SuspenderReg);
+
+ masm.Pop(DataReg);
+ masm.Pop(FnReg);
+ masm.Pop(SuspenderReg);
+
+ masm.Push(SuspenderReg);
+ int32_t framePushedAtSuspender = masm.framePushed();
+ masm.Push(InstanceReg);
+
+ wasm::CallSiteDesc desc(wasm::CallSiteDesc::Kind::StackSwitch);
+ CodeLabel returnCallsite;
+
+ // Aligning stack before trampoline call.
+ uint32_t reserve = ComputeByteAlignment(
+ masm.framePushed() - sizeof(wasm::Frame), WasmStackAlignment);
+ masm.reserveStack(reserve);
+
+ masm.loadPrivate(Address(SuspenderReg, NativeObject::getFixedSlotOffset(
+ wasm::SuspenderObjectDataSlot)),
+ SuspenderDataReg);
+
+ // Switch stacks to main.
+ masm.storeStackPtr(Address(
+ SuspenderDataReg, wasm::SuspenderObjectData::offsetOfSuspendableSP()));
+ masm.storePtr(FramePointer,
+ Address(SuspenderDataReg,
+ wasm::SuspenderObjectData::offsetOfSuspendableFP()));
+
+ masm.loadStackPtr(
+ Address(SuspenderDataReg, wasm::SuspenderObjectData::offsetOfMainSP()));
+ masm.loadPtr(
+ Address(SuspenderDataReg, wasm::SuspenderObjectData::offsetOfMainFP()),
+ FramePointer);
+
+ // Set main_ra field to returnCallsite.
+ masm.mov(&returnCallsite, ScratchReg);
+ masm.storePtr(
+ ScratchReg,
+ Address(SuspenderDataReg,
+ wasm::SuspenderObjectData::offsetOfSuspendedReturnAddress()));
+
+ masm.assertStackAlignment(WasmStackAlignment);
+
+ // The FramePointer is pointing to the same
+ // place as before switch happened.
+ uint32_t framePushed = masm.framePushed();
+
+ // On different stack, reset framePushed. FramePointer is not valid here.
+ masm.setFramePushed(0);
+
+ // Pass the suspender and data params through the wasm function ABI registers.
+ WasmABIArgGenerator abi;
+ ABIArg arg;
+ arg = abi.next(MIRType::Pointer);
+ MOZ_RELEASE_ASSERT(arg.kind() == ABIArg::GPR);
+ masm.movePtr(SuspenderReg, arg.gpr());
+ arg = abi.next(MIRType::Pointer);
+ MOZ_RELEASE_ASSERT(arg.kind() == ABIArg::GPR);
+ masm.movePtr(DataReg, arg.gpr());
+ unsigned reserveBeforeCall = abi.stackBytesConsumedSoFar();
+
+ MOZ_ASSERT(masm.framePushed() == 0);
+ unsigned argDecrement =
+ StackDecrementForCall(WasmStackAlignment, 0, reserveBeforeCall);
+ masm.reserveStack(argDecrement);
+
+ masm.storePtr(InstanceReg, Address(masm.getStackPointer(),
+ WasmCallerInstanceOffsetBeforeCall));
+
+ // Get wasm instance pointer for callee.
+ size_t instanceSlotOffset = FunctionExtended::offsetOfExtendedSlot(
+ FunctionExtended::WASM_INSTANCE_SLOT);
+ masm.loadPtr(Address(FnReg, instanceSlotOffset), InstanceReg);
+
+ masm.storePtr(InstanceReg, Address(masm.getStackPointer(),
+ WasmCalleeInstanceOffsetBeforeCall));
+ masm.loadWasmPinnedRegsFromInstance();
+
+ masm.assertStackAlignment(WasmStackAlignment);
+
+ const Register ReturnAddressReg = ScratchReg;
+
+ // Load InstanceReg from suspendable stack exit frame.
+ masm.loadPtr(Address(SuspenderDataReg,
+ wasm::SuspenderObjectData::offsetOfSuspendableExitFP()),
+ ScratchReg);
+ masm.loadPtr(
+ Address(ScratchReg, wasm::FrameWithInstances::callerInstanceOffset()),
+ ScratchReg);
+ masm.storePtr(ScratchReg, Address(masm.getStackPointer(),
+ WasmCallerInstanceOffsetBeforeCall));
+
+ // Load RA from suspendable stack exit frame.
+ masm.loadPtr(Address(SuspenderDataReg,
+ wasm::SuspenderObjectData::offsetOfSuspendableExitFP()),
+ ScratchReg);
+ masm.loadPtr(Address(ScratchReg, wasm::Frame::returnAddressOffset()),
+ ReturnAddressReg);
+
+ // Call wasm function fast.
+# ifdef JS_USE_LINK_REGISTER
+ masm.mov(ReturnAddressReg, lr);
+# else
+ masm.Push(ReturnAddressReg);
+# endif
+ // Get funcUncheckedCallEntry() from the function's
+ // WASM_FUNC_UNCHECKED_ENTRY_SLOT extended slot.
+ size_t uncheckedEntrySlotOffset = FunctionExtended::offsetOfExtendedSlot(
+ FunctionExtended::WASM_FUNC_UNCHECKED_ENTRY_SLOT);
+ masm.loadPtr(Address(FnReg, uncheckedEntrySlotOffset), ScratchReg);
+ masm.jump(ScratchReg);
+
+ // About to use valid FramePointer -- restore framePushed.
+ masm.setFramePushed(framePushed);
+
+ // For IsPlausibleStackMapKey check for the following callsite.
+ masm.wasmTrapInstruction();
+
+ // Callsite for return from suspendable stack.
+ masm.bind(&returnCallsite);
+ masm.append(desc, *returnCallsite.target());
+ masm.addCodeLabel(returnCallsite);
+
+ masm.assertStackAlignment(WasmStackAlignment);
+
+ markSafepointAt(returnCallsite.target()->offset(), lir);
+ lir->safepoint()->setFramePushedAtStackMapBase(framePushed);
+ lir->safepoint()->setWasmSafepointKind(WasmSafepointKind::StackSwitch);
+ // Rooting SuspenderReg.
+ masm.propagateOOM(
+ lir->safepoint()->addWasmAnyRefSlot(true, framePushedAtSuspender));
+
+ masm.freeStackTo(framePushed);
+
+ masm.freeStack(reserve);
+ masm.Pop(InstanceReg);
+ masm.Pop(SuspenderReg);
+
+ // Using SuspenderDataReg and DataReg as temps.
+ masm.switchToWasmInstanceRealm(SuspenderDataReg, DataReg);
+
+ callWasmUpdateSuspenderState(wasm::UpdateSuspenderStateAction::Resume,
+ SuspenderReg);
+#else
+ MOZ_CRASH("NYI");
+#endif // ENABLE_WASM_JSPI
+}
+
+void CodeGenerator::visitWasmStackContinueOnSuspendable(
+ LWasmStackContinueOnSuspendable* lir) {
+#ifdef ENABLE_WASM_JSPI
+ const Register SuspenderReg = lir->suspender()->toRegister().gpr();
+ const Register SuspenderDataReg = ABINonArgReg3;
+
+# ifdef JS_CODEGEN_ARM64
+ vixl::UseScratchRegisterScope temps(&masm);
+ const Register ScratchReg = temps.AcquireX().asUnsized();
+# elif !defined(JS_CODEGEN_X64)
+# error "NYI: scratch register"
+# endif
+
+ masm.Push(SuspenderReg);
+ int32_t framePushedAtSuspender = masm.framePushed();
+ masm.Push(InstanceReg);
+
+ wasm::CallSiteDesc desc(wasm::CallSiteDesc::Kind::StackSwitch);
+ CodeLabel returnCallsite;
+
+ // Aligning stack before trampoline call.
+ uint32_t reserve = ComputeByteAlignment(
+ masm.framePushed() - sizeof(wasm::Frame), WasmStackAlignment);
+ masm.reserveStack(reserve);
+
+ masm.loadPrivate(Address(SuspenderReg, NativeObject::getFixedSlotOffset(
+ wasm::SuspenderObjectDataSlot)),
+ SuspenderDataReg);
+ masm.storeStackPtr(
+ Address(SuspenderDataReg, wasm::SuspenderObjectData::offsetOfMainSP()));
+ masm.storePtr(
+ FramePointer,
+ Address(SuspenderDataReg, wasm::SuspenderObjectData::offsetOfMainFP()));
+
+ // Adjust exit frame FP.
+ masm.loadPtr(Address(SuspenderDataReg,
+ wasm::SuspenderObjectData::offsetOfSuspendableExitFP()),
+ ScratchReg);
+ masm.storePtr(FramePointer,
+ Address(ScratchReg, wasm::Frame::callerFPOffset()));
+
+ // Adjust exit frame RA.
+ const Register TempReg = SuspenderDataReg;
+ masm.Push(TempReg);
+ masm.mov(&returnCallsite, TempReg);
+ masm.storePtr(TempReg,
+ Address(ScratchReg, wasm::Frame::returnAddressOffset()));
+ masm.Pop(TempReg);
+ // Adjust exit frame caller instance slot.
+ masm.storePtr(
+ InstanceReg,
+ Address(ScratchReg, wasm::FrameWithInstances::callerInstanceOffset()));
+
+ // Switch stacks to suspendable.
+ masm.loadStackPtr(Address(
+ SuspenderDataReg, wasm::SuspenderObjectData::offsetOfSuspendableSP()));
+ masm.loadPtr(Address(SuspenderDataReg,
+ wasm::SuspenderObjectData::offsetOfSuspendableFP()),
+ FramePointer);
+
+ masm.assertStackAlignment(WasmStackAlignment);
+
+ // The FramePointer is pointing to the same
+ // place as before switch happened.
+ uint32_t framePushed = masm.framePushed();
+
+ // On different stack, reset framePushed. FramePointer is not valid here.
+ masm.setFramePushed(0);
+
+ // Restore shadow stack area and instance slots.
+ WasmABIArgGenerator abi;
+ unsigned reserveBeforeCall = abi.stackBytesConsumedSoFar();
+ MOZ_ASSERT(masm.framePushed() == 0);
+ unsigned argDecrement =
+ StackDecrementForCall(WasmStackAlignment, 0, reserveBeforeCall);
+ masm.reserveStack(argDecrement);
+
+ masm.storePtr(InstanceReg, Address(masm.getStackPointer(),
+ WasmCallerInstanceOffsetBeforeCall));
+ masm.storePtr(InstanceReg, Address(masm.getStackPointer(),
+ WasmCalleeInstanceOffsetBeforeCall));
+
+ masm.assertStackAlignment(WasmStackAlignment);
+
+ const Register ReturnAddressReg = ScratchReg;
+
+ // Pretend we just returned from the function.
+ masm.loadPtr(
+ Address(SuspenderDataReg,
+ wasm::SuspenderObjectData::offsetOfSuspendedReturnAddress()),
+ ReturnAddressReg);
+ masm.jump(ReturnAddressReg);
+
+ // About to use valid FramePointer -- restore framePushed.
+ masm.setFramePushed(framePushed);
+
+ // For IsPlausibleStackMapKey check for the following callsite.
+ masm.wasmTrapInstruction();
+
+ // Callsite for return from suspendable stack.
+ masm.bind(&returnCallsite);
+ masm.append(desc, *returnCallsite.target());
+ masm.addCodeLabel(returnCallsite);
+
+ masm.assertStackAlignment(WasmStackAlignment);
+
+ markSafepointAt(returnCallsite.target()->offset(), lir);
+ lir->safepoint()->setFramePushedAtStackMapBase(framePushed);
+ lir->safepoint()->setWasmSafepointKind(WasmSafepointKind::StackSwitch);
+ // Rooting SuspenderReg.
+ masm.propagateOOM(
+ lir->safepoint()->addWasmAnyRefSlot(true, framePushedAtSuspender));
+
+ masm.freeStackTo(framePushed);
+
+ masm.freeStack(reserve);
+ masm.Pop(InstanceReg);
+ masm.Pop(SuspenderReg);
+
+ // Using SuspenderDataReg and ABINonArgReg2 as temps.
+ masm.switchToWasmInstanceRealm(SuspenderDataReg, ABINonArgReg2);
+
+ callWasmUpdateSuspenderState(wasm::UpdateSuspenderStateAction::Leave,
+ SuspenderReg);
+#else
+ MOZ_CRASH("NYI");
+#endif // ENABLE_WASM_JSPI
+}
+
void CodeGenerator::visitWasmCallLandingPrePad(LWasmCallLandingPrePad* lir) {
LBlock* block = lir->block();
MWasmCallLandingPrePad* mir = lir->mir();
@@ -11319,6 +11905,7 @@ void CodeGenerator::visitCompareSInline(LCompareSInline* lir) {
masm.bind(&notPointerEqual);
Label setNotEqualResult;
+
if (str->isAtom()) {
// Atoms cannot be equal to each other if they point to different strings.
Imm32 atomBit(JSString::ATOM_BIT);
@@ -11336,8 +11923,27 @@ void CodeGenerator::visitCompareSInline(LCompareSInline* lir) {
}
// Strings of different length can never be equal.
- masm.branch32(Assembler::Equal, Address(input, JSString::offsetOfLength()),
- Imm32(str->length()), &compareChars);
+ masm.branch32(Assembler::NotEqual,
+ Address(input, JSString::offsetOfLength()),
+ Imm32(str->length()), &setNotEqualResult);
+
+ if (str->isAtom()) {
+ Label forwardedPtrEqual;
+ masm.tryFastAtomize(input, output, output, &compareChars);
+
+ // We now have two atoms. Just check pointer equality.
+ masm.branchPtr(Assembler::Equal, output, ImmGCPtr(str),
+ &forwardedPtrEqual);
+
+ masm.move32(Imm32(op == JSOp::Ne || op == JSOp::StrictNe), output);
+ masm.jump(ool->rejoin());
+
+ masm.bind(&forwardedPtrEqual);
+ masm.move32(Imm32(op == JSOp::Eq || op == JSOp::StrictEq), output);
+ masm.jump(ool->rejoin());
+ } else {
+ masm.jump(&compareChars);
+ }
masm.bind(&setNotEqualResult);
masm.move32(Imm32(op == JSOp::Ne || op == JSOp::StrictNe), output);
@@ -12506,6 +13112,14 @@ void CodeGenerator::visitSubstr(LSubstr* lir) {
masm.storeDependentStringBase(string, output);
auto initializeDependentString = [&](CharEncoding encoding) {
+ masm.loadPtr(Address(string, JSString::offsetOfFlags()), temp0);
+ Label skipDependedOn;
+ masm.branchTest32(Assembler::NonZero, temp0, Imm32(JSString::ATOM_BIT),
+ &skipDependedOn);
+ masm.or32(Imm32(JSString::DEPENDED_ON_BIT), temp0);
+ masm.store32(temp0, Address(string, JSString::offsetOfFlags()));
+ masm.bind(&skipDependedOn);
+
uint32_t flags = JSString::INIT_DEPENDENT_FLAGS;
if (encoding == CharEncoding::Latin1) {
flags |= JSString::LATIN1_CHARS_BIT;
@@ -15325,6 +15939,7 @@ static bool CreateStackMapFromLSafepoint(LSafepoint& safepoint,
GeneralRegisterForwardIterator refRegsIter(refRegs);
switch (safepoint.wasmSafepointKind()) {
case WasmSafepointKind::LirCall:
+ case WasmSafepointKind::StackSwitch:
case WasmSafepointKind::CodegenCall: {
size_t spilledNumWords = nRegisterDumpBytes / sizeof(void*);
regDumpWords += spilledNumWords;
@@ -16317,9 +16932,26 @@ void CodeGenerator::emitMaybeAtomizeSlot(LInstruction* ins, Register stringReg,
OutOfLineAtomizeSlot* ool =
new (alloc()) OutOfLineAtomizeSlot(ins, stringReg, slotAddr, dest);
addOutOfLineCode(ool, ins->mirRaw()->toInstruction());
+ masm.branchTest32(Assembler::NonZero,
+ Address(stringReg, JSString::offsetOfFlags()),
+ Imm32(JSString::ATOM_BIT), ool->rejoin());
+
masm.branchTest32(Assembler::Zero,
Address(stringReg, JSString::offsetOfFlags()),
- Imm32(JSString::ATOM_BIT), ool->entry());
+ Imm32(JSString::ATOM_REF_BIT), ool->entry());
+ masm.loadPtr(Address(stringReg, JSAtomRefString::offsetOfAtom()), stringReg);
+
+ if (dest.hasValue()) {
+ masm.moveValue(
+ TypedOrValueRegister(MIRType::String, AnyRegister(stringReg)),
+ dest.valueReg());
+ } else {
+ MOZ_ASSERT(dest.typedReg().gpr() == stringReg);
+ }
+
+ emitPreBarrier(slotAddr);
+ masm.storeTypedOrValue(dest, slotAddr);
+
masm.bind(ool->rejoin());
}
@@ -20474,7 +21106,7 @@ void CodeGenerator::visitToHashableString(LToHashableString* ins) {
Address(input, JSString::offsetOfFlags()),
Imm32(JSString::ATOM_BIT), &isAtom);
- masm.lookupStringInAtomCacheLastLookups(input, output, output, ool->entry());
+ masm.tryFastAtomize(input, output, output, ool->entry());
masm.jump(ool->rejoin());
masm.bind(&isAtom);
masm.movePtr(input, output);
diff --git a/js/src/jit/CodeGenerator.h b/js/src/jit/CodeGenerator.h
index 282771a79e..c883569193 100644
--- a/js/src/jit/CodeGenerator.h
+++ b/js/src/jit/CodeGenerator.h
@@ -109,6 +109,11 @@ class CodeGenerator final : public CodeGeneratorSpecific {
const StoreOutputTo& out);
template <typename LCallIns>
+ void emitCallNative(LCallIns* call, JSNative native, Register argContextReg,
+ Register argUintNReg, Register argVpReg, Register tempReg,
+ uint32_t unusedStack);
+
+ template <typename LCallIns>
void emitCallNative(LCallIns* call, JSNative native);
public:
@@ -204,6 +209,11 @@ class CodeGenerator final : public CodeGeneratorSpecific {
wasm::BytecodeOffset bytecodeOffset);
void visitOutOfLineWasmNewArray(OutOfLineWasmNewArray* ool);
+#ifdef ENABLE_WASM_JSPI
+ void callWasmUpdateSuspenderState(wasm::UpdateSuspenderStateAction kind,
+ Register suspender);
+#endif
+
private:
void emitPostWriteBarrier(const LAllocation* obj);
void emitPostWriteBarrier(Register objreg);
@@ -248,7 +258,7 @@ class CodeGenerator final : public CodeGeneratorSpecific {
template <typename T>
void emitApplyNative(T* apply);
template <typename T>
- void emitCallInvokeNativeFunction(T* apply);
+ void emitAlignStackForApplyNative(T* apply, Register argc);
template <typename T>
void emitPushNativeArguments(T* apply);
template <typename T>
diff --git a/js/src/jit/CompileInfo.h b/js/src/jit/CompileInfo.h
index 1980a3d905..eff46d6574 100644
--- a/js/src/jit/CompileInfo.h
+++ b/js/src/jit/CompileInfo.h
@@ -146,6 +146,7 @@ class CompileInfo {
hadReorderingBailout_(false),
hadBoundsCheckBailout_(false),
hadUnboxFoldingBailout_(false),
+ branchHintingEnabled_(false),
mayReadFrameArgsDirectly_(false),
anyFormalIsForwarded_(false),
inlineScriptTree_(nullptr),
@@ -339,6 +340,12 @@ class CompileInfo {
bool hadBoundsCheckBailout() const { return hadBoundsCheckBailout_; }
bool hadUnboxFoldingBailout() const { return hadUnboxFoldingBailout_; }
+ bool branchHintingEnabled() const {
+ return compilingWasm() && branchHintingEnabled_;
+ }
+
+ void setBranchHinting(bool value) { branchHintingEnabled_ = value; }
+
bool mayReadFrameArgsDirectly() const { return mayReadFrameArgsDirectly_; }
bool anyFormalIsForwarded() const { return anyFormalIsForwarded_; }
@@ -370,6 +377,8 @@ class CompileInfo {
bool hadBoundsCheckBailout_;
bool hadUnboxFoldingBailout_;
+ bool branchHintingEnabled_;
+
bool mayReadFrameArgsDirectly_;
bool anyFormalIsForwarded_;
diff --git a/js/src/jit/GenerateCacheIRFiles.py b/js/src/jit/GenerateCacheIRFiles.py
index d71c70b753..9baf3b7a27 100644
--- a/js/src/jit/GenerateCacheIRFiles.py
+++ b/js/src/jit/GenerateCacheIRFiles.py
@@ -79,6 +79,8 @@ arg_writer_info = {
"DoubleField": ("double", "writeDoubleField"),
"AllocSiteField": ("gc::AllocSite*", "writeAllocSiteField"),
"JSOpImm": ("JSOp", "writeJSOpImm"),
+ "JSTypeImm": ("JSType", "writeJSTypeImm"),
+ "TypeofEqOperandImm": ("TypeofEqOperand", "writeTypeofEqOperandImm"),
"BoolImm": ("bool", "writeBoolImm"),
"ByteImm": ("uint32_t", "writeByteImm"), # uint32_t to enable fits-in-byte asserts.
"GuardClassKindImm": ("GuardClassKind", "writeGuardClassKindImm"),
@@ -182,6 +184,8 @@ arg_reader_info = {
"DoubleField": ("uint32_t", "Offset", "reader.stubOffset()"),
"AllocSiteField": ("uint32_t", "Offset", "reader.stubOffset()"),
"JSOpImm": ("JSOp", "", "reader.jsop()"),
+ "JSTypeImm": ("JSType", "", "reader.jstype()"),
+ "TypeofEqOperandImm": ("TypeofEqOperand", "", "reader.typeofEqOperand()"),
"BoolImm": ("bool", "", "reader.readBool()"),
"ByteImm": ("uint8_t", "", "reader.readByte()"),
"GuardClassKindImm": ("GuardClassKind", "", "reader.guardClassKind()"),
@@ -275,6 +279,8 @@ arg_spewer_method = {
"DoubleField": "spewField",
"AllocSiteField": "spewField",
"JSOpImm": "spewJSOpImm",
+ "JSTypeImm": "spewJSTypeImm",
+ "TypeofEqOperandImm": "spewTypeofEqOperandImm",
"BoolImm": "spewBoolImm",
"ByteImm": "spewByteImm",
"GuardClassKindImm": "spewGuardClassKindImm",
@@ -420,6 +426,8 @@ arg_length = {
"ScalarTypeImm": 1,
"UnaryMathFunctionImm": 1,
"JSOpImm": 1,
+ "JSTypeImm": 1,
+ "TypeofEqOperandImm": 1,
"ValueTypeImm": 1,
"GuardClassKindImm": 1,
"ArrayBufferViewKindImm": 1,
diff --git a/js/src/jit/InterpreterEntryTrampoline.cpp b/js/src/jit/InterpreterEntryTrampoline.cpp
index 2e662af559..4dc196476c 100644
--- a/js/src/jit/InterpreterEntryTrampoline.cpp
+++ b/js/src/jit/InterpreterEntryTrampoline.cpp
@@ -40,19 +40,18 @@ void EntryTrampolineMap::updateScriptsAfterMovingGC(void) {
}
#ifdef JSGC_HASH_TABLE_CHECKS
-void EntryTrampoline::checkTrampolineAfterMovingGC() {
+void EntryTrampoline::checkTrampolineAfterMovingGC() const {
JitCode* trampoline = entryTrampoline_;
CheckGCThingAfterMovingGC(trampoline);
}
void EntryTrampolineMap::checkScriptsAfterMovingGC() {
- for (jit::EntryTrampolineMap::Enum r(*this); !r.empty(); r.popFront()) {
- BaseScript* script = r.front().key();
+ gc::CheckTableAfterMovingGC(*this, [](const auto& entry) {
+ BaseScript* script = entry.key();
CheckGCThingAfterMovingGC(script);
- r.front().value().checkTrampolineAfterMovingGC();
- auto ptr = lookup(script);
- MOZ_RELEASE_ASSERT(ptr.found() && &*ptr == &r.front());
- }
+ entry.value().checkTrampolineAfterMovingGC();
+ return script;
+ });
}
#endif
diff --git a/js/src/jit/InterpreterEntryTrampoline.h b/js/src/jit/InterpreterEntryTrampoline.h
index 4f49f3fe13..2c7d658043 100644
--- a/js/src/jit/InterpreterEntryTrampoline.h
+++ b/js/src/jit/InterpreterEntryTrampoline.h
@@ -58,7 +58,7 @@ class EntryTrampoline {
}
#ifdef JSGC_HASH_TABLE_CHECKS
- void checkTrampolineAfterMovingGC();
+ void checkTrampolineAfterMovingGC() const;
#endif
};
diff --git a/js/src/jit/Ion.cpp b/js/src/jit/Ion.cpp
index e209ace846..03965d3d60 100644
--- a/js/src/jit/Ion.cpp
+++ b/js/src/jit/Ion.cpp
@@ -20,6 +20,7 @@
#include "jit/BacktrackingAllocator.h"
#include "jit/BaselineFrame.h"
#include "jit/BaselineJIT.h"
+#include "jit/BranchHinting.h"
#include "jit/CodeGenerator.h"
#include "jit/CompileInfo.h"
#include "jit/EdgeCaseAnalysis.h"
@@ -1223,6 +1224,19 @@ bool OptimizeMIR(MIRGenerator* mir) {
}
}
+ if (mir->branchHintingEnabled()) {
+ JitSpewCont(JitSpew_BranchHint, "\n");
+ if (!BranchHinting(mir, graph)) {
+ return false;
+ }
+ gs.spewPass("BranchHinting");
+ AssertBasicGraphCoherency(graph);
+
+ if (mir->shouldCancel("BranchHinting")) {
+ return false;
+ }
+ }
+
// LICM can hoist instructions from conditional branches and
// trigger bailouts. Disable it if bailing out of a hoisted
// instruction has previously invalidated this script.
diff --git a/js/src/jit/IonCacheIRCompiler.cpp b/js/src/jit/IonCacheIRCompiler.cpp
index 948cb38011..89fa96c758 100644
--- a/js/src/jit/IonCacheIRCompiler.cpp
+++ b/js/src/jit/IonCacheIRCompiler.cpp
@@ -564,6 +564,7 @@ bool IonCacheIRCompiler::init() {
}
case CacheKind::Call:
case CacheKind::TypeOf:
+ case CacheKind::TypeOfEq:
case CacheKind::ToBool:
case CacheKind::GetIntrinsic:
case CacheKind::NewArray:
@@ -677,6 +678,7 @@ void IonCacheIRCompiler::assertFloatRegisterAvailable(FloatRegister reg) {
break;
case CacheKind::Call:
case CacheKind::TypeOf:
+ case CacheKind::TypeOfEq:
case CacheKind::ToBool:
case CacheKind::GetIntrinsic:
case CacheKind::NewArray:
diff --git a/js/src/jit/IonIC.cpp b/js/src/jit/IonIC.cpp
index 55f3bbea6c..9ecbf1fd24 100644
--- a/js/src/jit/IonIC.cpp
+++ b/js/src/jit/IonIC.cpp
@@ -73,6 +73,7 @@ Register IonIC::scratchRegisterForEntryJump() {
return asOptimizeGetIteratorIC()->temp();
case CacheKind::Call:
case CacheKind::TypeOf:
+ case CacheKind::TypeOfEq:
case CacheKind::ToBool:
case CacheKind::GetIntrinsic:
case CacheKind::NewArray:
@@ -364,7 +365,7 @@ bool IonGetNameIC::update(JSContext* cx, HandleScript outerScript,
return false;
}
- if (JSOp(*GetNextPc(pc)) == JSOp::Typeof) {
+ if (IsTypeOfNameOp(JSOp(*GetNextPc(pc)))) {
return FetchName<GetNameMode::TypeOf>(cx, obj, holder, name, prop, res);
}
diff --git a/js/src/jit/IonTypes.h b/js/src/jit/IonTypes.h
index 5c7818e533..0bae1261c0 100644
--- a/js/src/jit/IonTypes.h
+++ b/js/src/jit/IonTypes.h
@@ -734,6 +734,9 @@ static inline MIRType ScalarTypeToMIRType(Scalar::Type type) {
return MIRType::Int32;
case Scalar::Int64:
return MIRType::Int64;
+ case Scalar::Float16:
+ // TODO: See Bug 1835034 for JIT support for Float16Array
+ MOZ_CRASH("NYI");
case Scalar::Float32:
return MIRType::Float32;
case Scalar::Float64:
diff --git a/js/src/jit/JitFrames.cpp b/js/src/jit/JitFrames.cpp
index 45ac1f5def..69c259994e 100644
--- a/js/src/jit/JitFrames.cpp
+++ b/js/src/jit/JitFrames.cpp
@@ -11,6 +11,7 @@
#include <algorithm>
#include "builtin/ModuleObject.h"
+#include "builtin/Sorting.h"
#include "gc/GC.h"
#include "jit/BaselineFrame.h"
#include "jit/BaselineIC.h"
@@ -35,6 +36,7 @@
#include "wasm/WasmBuiltins.h"
#include "wasm/WasmInstance.h"
+#include "builtin/Sorting-inl.h"
#include "debugger/DebugAPI-inl.h"
#include "jit/JSJitFrameIter-inl.h"
#include "vm/GeckoProfiler-inl.h"
@@ -97,6 +99,7 @@ static void UnwindTrampolineNativeFrame(JSRuntime* rt,
TrampolineNative native = TrampolineNativeForFrame(rt, layout);
switch (native) {
case TrampolineNative::ArraySort:
+ case TrampolineNative::TypedArraySort:
layout->getFrameData<ArraySortData>()->freeMallocData();
break;
case TrampolineNative::Count:
@@ -225,13 +228,8 @@ static void OnLeaveIonFrame(JSContext* cx, const InlineFrameIterator& frame,
RematerializedFrame* rematFrame = nullptr;
{
JS::AutoSaveExceptionState savedExc(cx);
-
- // We can run recover instructions without invalidating because we're
- // already leaving the frame.
- MaybeReadFallback::FallbackConsequence consequence =
- MaybeReadFallback::Fallback_DoNothing;
rematFrame = act->getRematerializedFrame(cx, frame.frame(), frame.frameNo(),
- consequence);
+ IsLeavingFrame::Yes);
if (!rematFrame) {
return;
}
@@ -1431,6 +1429,7 @@ static void TraceTrampolineNativeFrame(JSTracer* trc,
TrampolineNative native = TrampolineNativeForFrame(trc->runtime(), layout);
switch (native) {
case TrampolineNative::ArraySort:
+ case TrampolineNative::TypedArraySort:
layout->getFrameData<ArraySortData>()->trace(trc);
break;
case TrampolineNative::Count:
@@ -1503,6 +1502,11 @@ static void TraceJitActivation(JSTracer* trc, JitActivation* activation) {
uint8_t* nextPC = frames.resumePCinCurrentFrame();
MOZ_ASSERT(nextPC != 0);
wasm::WasmFrameIter& wasmFrameIter = frames.asWasm();
+#ifdef ENABLE_WASM_JSPI
+ if (wasmFrameIter.stackSwitched()) {
+ highestByteVisitedInPrevWasmFrame = 0;
+ }
+#endif
wasm::Instance* instance = wasmFrameIter.instance();
wasm::TraceInstanceEdge(trc, instance, "WasmFrameIter instance");
highestByteVisitedInPrevWasmFrame = instance->traceFrame(
@@ -1516,6 +1520,9 @@ void TraceJitActivations(JSContext* cx, JSTracer* trc) {
++activations) {
TraceJitActivation(trc, activations->asJit());
}
+#ifdef ENABLE_WASM_JSPI
+ cx->wasm().promiseIntegration.traceRoots(trc);
+#endif
}
void TraceWeakJitActivationsInSweepingZones(JSContext* cx, JSTracer* trc) {
diff --git a/js/src/jit/JitRuntime.h b/js/src/jit/JitRuntime.h
index 383efca437..2603bde523 100644
--- a/js/src/jit/JitRuntime.h
+++ b/js/src/jit/JitRuntime.h
@@ -43,6 +43,7 @@ namespace js {
class AutoLockHelperThreadState;
class GCMarker;
+enum class ArraySortKind;
namespace jit {
@@ -307,7 +308,8 @@ class JitRuntime {
void generateTrampolineNatives(MacroAssembler& masm,
TrampolineNativeJitEntryOffsets& offsets,
PerfSpewerRangeRecorder& rangeRecorder);
- uint32_t generateArraySortTrampoline(MacroAssembler& masm);
+ uint32_t generateArraySortTrampoline(MacroAssembler& masm,
+ ArraySortKind kind);
void bindLabelToOffset(Label* label, uint32_t offset) {
MOZ_ASSERT(!trampolineCode_);
diff --git a/js/src/jit/JitScript.cpp b/js/src/jit/JitScript.cpp
index 62a14a70b6..df7557b7ac 100644
--- a/js/src/jit/JitScript.cpp
+++ b/js/src/jit/JitScript.cpp
@@ -703,9 +703,12 @@ void jit::JitSpewBaselineICStats(JSScript* script, const char* dumpReason) {
}
#endif
+using StubHashMap = HashMap<ICCacheIRStub*, ICCacheIRStub*,
+ DefaultHasher<ICCacheIRStub*>, SystemAllocPolicy>;
+
static void MarkActiveICScriptsAndCopyStubs(
JSContext* cx, const JitActivationIterator& activation,
- ICStubSpace& newStubSpace) {
+ ICStubSpace& newStubSpace, StubHashMap& alreadyClonedStubs) {
for (OnlyJSJitFrameIter iter(activation); !iter.done(); ++iter) {
const JSJitFrameIter& frame = iter.frame();
switch (frame.type()) {
@@ -721,8 +724,15 @@ static void MarkActiveICScriptsAndCopyStubs(
auto* layout = reinterpret_cast<BaselineStubFrameLayout*>(frame.fp());
if (layout->maybeStubPtr() && !layout->maybeStubPtr()->isFallback()) {
ICCacheIRStub* stub = layout->maybeStubPtr()->toCacheIRStub();
- ICCacheIRStub* newStub = stub->clone(cx->runtime(), newStubSpace);
- layout->setStubPtr(newStub);
+ auto lookup = alreadyClonedStubs.lookupForAdd(stub);
+ if (!lookup) {
+ ICCacheIRStub* newStub = stub->clone(cx->runtime(), newStubSpace);
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ if (!alreadyClonedStubs.add(lookup, stub, newStub)) {
+ oomUnsafe.crash("MarkActiveICScriptsAndCopyStubs");
+ }
+ }
+ layout->setStubPtr(lookup->value());
// If this is a trial-inlining call site, also preserve the callee
// ICScript. Inlined constructor calls invoke CreateThisFromIC (which
@@ -772,10 +782,12 @@ void jit::MarkActiveICScriptsAndCopyStubs(Zone* zone,
if (zone->isAtomsZone()) {
return;
}
+ StubHashMap alreadyClonedStubs;
JSContext* cx = TlsContext.get();
for (JitActivationIterator iter(cx); !iter.done(); ++iter) {
if (iter->compartment()->zone() == zone) {
- MarkActiveICScriptsAndCopyStubs(cx, iter, newStubSpace);
+ MarkActiveICScriptsAndCopyStubs(cx, iter, newStubSpace,
+ alreadyClonedStubs);
}
}
}
diff --git a/js/src/jit/JitSpewer.cpp b/js/src/jit/JitSpewer.cpp
index 11e3165240..4eb31b2089 100644
--- a/js/src/jit/JitSpewer.cpp
+++ b/js/src/jit/JitSpewer.cpp
@@ -360,6 +360,7 @@ static void PrintHelpAndExit(int status = 0) {
" pools Literal Pools (ARM only for now)\n"
" cacheflush Instruction Cache flushes (ARM only for now)\n"
" range Range Analysis\n"
+ " branch-hint Wasm Branch Hinting\n"
" wasmbce Wasm Bounds Check Elimination\n"
" shapeguards Redundant shape guard elimination\n"
" gcbarriers Redundant GC barrier elimination\n"
@@ -432,6 +433,8 @@ void jit::CheckLogging() {
EnableChannel(JitSpew_Range);
} else if (IsFlag(found, "wasmbce")) {
EnableChannel(JitSpew_WasmBCE);
+ } else if (IsFlag(found, "branch-hint")) {
+ EnableChannel(JitSpew_BranchHint);
} else if (IsFlag(found, "licm")) {
EnableChannel(JitSpew_LICM);
} else if (IsFlag(found, "flac")) {
diff --git a/js/src/jit/JitSpewer.h b/js/src/jit/JitSpewer.h
index bfc92c74f2..2b332d117c 100644
--- a/js/src/jit/JitSpewer.h
+++ b/js/src/jit/JitSpewer.h
@@ -41,6 +41,8 @@ namespace jit {
_(Range) \
/* Information during LICM */ \
_(LICM) \
+ /* Information during Branch Hinting */ \
+ _(BranchHint) \
/* Info about fold linear constants */ \
_(FLAC) \
/* Effective address analysis info */ \
diff --git a/js/src/jit/LIR.h b/js/src/jit/LIR.h
index 30a35b0dea..1b1b107557 100644
--- a/js/src/jit/LIR.h
+++ b/js/src/jit/LIR.h
@@ -1422,6 +1422,8 @@ enum class WasmSafepointKind : uint8_t {
// For resumable wasm traps where registers will be spilled by the trap
// handler.
Trap,
+ // For stack switch call.
+ StackSwitch,
};
class LSafepoint : public TempObject {
diff --git a/js/src/jit/LIROps.yaml b/js/src/jit/LIROps.yaml
index 880e756f74..dccb455302 100644
--- a/js/src/jit/LIROps.yaml
+++ b/js/src/jit/LIROps.yaml
@@ -358,6 +358,25 @@
- name: InterruptCheck
mir_op: true
+- name: WasmStackSwitchToMain
+ operands:
+ suspender: WordSized
+ fn: WordSized
+ data: WordSized
+ call_instruction: true
+
+- name: WasmStackSwitchToSuspendable
+ operands:
+ suspender: WordSized
+ fn: WordSized
+ data: WordSized
+ call_instruction: true
+
+- name: WasmStackContinueOnSuspendable
+ operands:
+ suspender: WordSized
+ call_instruction: true
+
- name: WasmInterruptCheck
operands:
instance: WordSized
diff --git a/js/src/jit/Lowering.cpp b/js/src/jit/Lowering.cpp
index f7b898f240..f84ed576b4 100644
--- a/js/src/jit/Lowering.cpp
+++ b/js/src/jit/Lowering.cpp
@@ -654,7 +654,6 @@ void LIRGenerator::visitApplyArgs(MApplyArgs* apply) {
static_assert(CallTempReg2 != JSReturnReg_Type);
static_assert(CallTempReg2 != JSReturnReg_Data);
- auto function = useFixedAtStart(apply->getFunction(), CallTempReg3);
auto argc = useFixedAtStart(apply->getArgc(), CallTempReg0);
auto thisValue =
useBoxFixedAtStart(apply->getThis(), CallTempReg4, CallTempReg5);
@@ -665,9 +664,13 @@ void LIRGenerator::visitApplyArgs(MApplyArgs* apply) {
LInstruction* lir;
if (target && target->isNativeWithoutJitEntry()) {
+ auto temp = tempFixed(CallTempReg3);
+
lir = new (alloc())
- LApplyArgsNative(function, argc, thisValue, tempObj, tempCopy);
+ LApplyArgsNative(argc, thisValue, tempObj, tempCopy, temp);
} else {
+ auto function = useFixedAtStart(apply->getFunction(), CallTempReg3);
+
lir = new (alloc())
LApplyArgsGeneric(function, argc, thisValue, tempObj, tempCopy);
}
@@ -686,7 +689,6 @@ void LIRGenerator::visitApplyArgsObj(MApplyArgsObj* apply) {
static_assert(CallTempReg2 != JSReturnReg_Type);
static_assert(CallTempReg2 != JSReturnReg_Data);
- auto function = useFixedAtStart(apply->getFunction(), CallTempReg3);
auto argsObj = useFixedAtStart(apply->getArgsObj(), CallTempReg0);
auto thisValue =
useBoxFixedAtStart(apply->getThis(), CallTempReg4, CallTempReg5);
@@ -697,9 +699,13 @@ void LIRGenerator::visitApplyArgsObj(MApplyArgsObj* apply) {
LInstruction* lir;
if (target && target->isNativeWithoutJitEntry()) {
+ auto temp = tempFixed(CallTempReg3);
+
lir = new (alloc())
- LApplyArgsObjNative(function, argsObj, thisValue, tempObj, tempCopy);
+ LApplyArgsObjNative(argsObj, thisValue, tempObj, tempCopy, temp);
} else {
+ auto function = useFixedAtStart(apply->getFunction(), CallTempReg3);
+
lir = new (alloc())
LApplyArgsObj(function, argsObj, thisValue, tempObj, tempCopy);
}
@@ -718,7 +724,6 @@ void LIRGenerator::visitApplyArray(MApplyArray* apply) {
static_assert(CallTempReg2 != JSReturnReg_Type);
static_assert(CallTempReg2 != JSReturnReg_Data);
- auto function = useFixedAtStart(apply->getFunction(), CallTempReg3);
auto elements = useFixedAtStart(apply->getElements(), CallTempReg0);
auto thisValue =
useBoxFixedAtStart(apply->getThis(), CallTempReg4, CallTempReg5);
@@ -729,9 +734,13 @@ void LIRGenerator::visitApplyArray(MApplyArray* apply) {
LInstruction* lir;
if (target && target->isNativeWithoutJitEntry()) {
+ auto temp = tempFixed(CallTempReg3);
+
lir = new (alloc())
- LApplyArrayNative(function, elements, thisValue, tempObj, tempCopy);
+ LApplyArrayNative(elements, thisValue, tempObj, tempCopy, temp);
} else {
+ auto function = useFixedAtStart(apply->getFunction(), CallTempReg3);
+
lir = new (alloc())
LApplyArrayGeneric(function, elements, thisValue, tempObj, tempCopy);
}
@@ -754,7 +763,6 @@ void LIRGenerator::visitConstructArgs(MConstructArgs* mir) {
static_assert(CallTempReg2 != JSReturnReg_Type);
static_assert(CallTempReg2 != JSReturnReg_Data);
- auto function = useFixedAtStart(mir->getFunction(), CallTempReg3);
auto argc = useFixedAtStart(mir->getArgc(), CallTempReg0);
auto newTarget = useFixedAtStart(mir->getNewTarget(), CallTempReg1);
auto temp = tempFixed(CallTempReg2);
@@ -763,11 +771,13 @@ void LIRGenerator::visitConstructArgs(MConstructArgs* mir) {
LInstruction* lir;
if (target && target->isNativeWithoutJitEntry()) {
- auto temp2 = tempFixed(CallTempReg4);
+ auto temp2 = tempFixed(CallTempReg3);
+ auto temp3 = tempFixed(CallTempReg4);
- lir = new (alloc())
- LConstructArgsNative(function, argc, newTarget, temp, temp2);
+ lir =
+ new (alloc()) LConstructArgsNative(argc, newTarget, temp, temp2, temp3);
} else {
+ auto function = useFixedAtStart(mir->getFunction(), CallTempReg3);
auto thisValue =
useBoxFixedAtStart(mir->getThis(), CallTempReg4, CallTempReg5);
@@ -792,7 +802,6 @@ void LIRGenerator::visitConstructArray(MConstructArray* mir) {
static_assert(CallTempReg2 != JSReturnReg_Type);
static_assert(CallTempReg2 != JSReturnReg_Data);
- auto function = useFixedAtStart(mir->getFunction(), CallTempReg3);
auto elements = useFixedAtStart(mir->getElements(), CallTempReg0);
auto newTarget = useFixedAtStart(mir->getNewTarget(), CallTempReg1);
auto temp = tempFixed(CallTempReg2);
@@ -801,11 +810,13 @@ void LIRGenerator::visitConstructArray(MConstructArray* mir) {
LInstruction* lir;
if (target && target->isNativeWithoutJitEntry()) {
- auto temp2 = tempFixed(CallTempReg4);
+ auto temp2 = tempFixed(CallTempReg3);
+ auto temp3 = tempFixed(CallTempReg4);
lir = new (alloc())
- LConstructArrayNative(function, elements, newTarget, temp, temp2);
+ LConstructArrayNative(elements, newTarget, temp, temp2, temp3);
} else {
+ auto function = useFixedAtStart(mir->getFunction(), CallTempReg3);
auto thisValue =
useBoxFixedAtStart(mir->getThis(), CallTempReg4, CallTempReg5);
@@ -6351,6 +6362,48 @@ void LIRGenerator::visitWasmStackResult(MWasmStackResult* ins) {
add(lir, ins);
}
+void LIRGenerator::visitWasmStackSwitchToSuspendable(
+ MWasmStackSwitchToSuspendable* ins) {
+#ifdef ENABLE_WASM_JSPI
+ auto* lir = new (alloc()) LWasmStackSwitchToSuspendable(
+ useFixedAtStart(ins->suspender(), ABINonArgReg0),
+ useFixedAtStart(ins->fn(), ABINonArgReg1),
+ useFixedAtStart(ins->data(), ABINonArgReg2));
+
+ add(lir, ins);
+ assignWasmSafepoint(lir);
+#else
+ MOZ_CRASH("NYI");
+#endif
+}
+
+void LIRGenerator::visitWasmStackSwitchToMain(MWasmStackSwitchToMain* ins) {
+#ifdef ENABLE_WASM_JSPI
+ auto* lir = new (alloc())
+ LWasmStackSwitchToMain(useFixedAtStart(ins->suspender(), ABINonArgReg0),
+ useFixedAtStart(ins->fn(), ABINonArgReg1),
+ useFixedAtStart(ins->data(), ABINonArgReg2));
+
+ add(lir, ins);
+ assignWasmSafepoint(lir);
+#else
+ MOZ_CRASH("NYI");
+#endif
+}
+
+void LIRGenerator::visitWasmStackContinueOnSuspendable(
+ MWasmStackContinueOnSuspendable* ins) {
+#ifdef ENABLE_WASM_JSPI
+ auto* lir = new (alloc()) LWasmStackContinueOnSuspendable(
+ useFixedAtStart(ins->suspender(), ABINonArgReg0));
+
+ add(lir, ins);
+ assignWasmSafepoint(lir);
+#else
+ MOZ_CRASH("NYI");
+#endif
+}
+
template <class MWasmCallT>
void LIRGenerator::visitWasmCall(MWasmCallT ins) {
bool needsBoundsCheck = true;
diff --git a/js/src/jit/MIR.cpp b/js/src/jit/MIR.cpp
index a74406567b..f826fbd987 100644
--- a/js/src/jit/MIR.cpp
+++ b/js/src/jit/MIR.cpp
@@ -1122,11 +1122,16 @@ MConstant::MConstant(TempAllocator& alloc, const js::Value& vp)
case MIRType::Double:
payload_.d = vp.toDouble();
break;
- case MIRType::String:
- MOZ_ASSERT(!IsInsideNursery(vp.toString()));
- MOZ_ASSERT(vp.toString()->isLinear());
+ case MIRType::String: {
+ JSString* str = vp.toString();
+ if (str->isAtomRef()) {
+ str = str->atom();
+ }
+ MOZ_ASSERT(!IsInsideNursery(str));
+ MOZ_ASSERT(str->isAtom());
payload_.str = vp.toString();
break;
+ }
case MIRType::Symbol:
payload_.sym = vp.toSymbol();
break;
@@ -4310,11 +4315,56 @@ static JSType TypeOfName(JSLinearString* str) {
return JSTYPE_LIMIT;
}
-static mozilla::Maybe<std::pair<MTypeOfName*, JSType>> IsTypeOfCompare(
- MCompare* ins) {
+struct TypeOfCompareInput {
+ // The `typeof expr` side of the comparison.
+ // MTypeOfName for JSOp::Typeof/JSOp::TypeofExpr, and
+ // MTypeOf for JSOp::TypeofEq (same pointer as typeOf).
+ MDefinition* typeOfSide;
+
+ // The actual `typeof` operation.
+ MTypeOf* typeOf;
+
+ // The string side of the comparison.
+ JSType type;
+
+ // True if the comparison uses raw JSType (Generated for JSOp::TypeofEq).
+ bool isIntComparison;
+
+ TypeOfCompareInput(MDefinition* typeOfSide, MTypeOf* typeOf, JSType type,
+ bool isIntComparison)
+ : typeOfSide(typeOfSide),
+ typeOf(typeOf),
+ type(type),
+ isIntComparison(isIntComparison) {}
+};
+
+static mozilla::Maybe<TypeOfCompareInput> IsTypeOfCompare(MCompare* ins) {
if (!IsEqualityOp(ins->jsop())) {
return mozilla::Nothing();
}
+
+ if (ins->compareType() == MCompare::Compare_Int32) {
+ auto* lhs = ins->lhs();
+ auto* rhs = ins->rhs();
+
+ if (ins->type() != MIRType::Boolean || lhs->type() != MIRType::Int32 ||
+ rhs->type() != MIRType::Int32) {
+ return mozilla::Nothing();
+ }
+
+ // NOTE: The comparison is generated inside JIT, and typeof should always
+ // be in the LHS.
+ if (!lhs->isTypeOf() || !rhs->isConstant()) {
+ return mozilla::Nothing();
+ }
+
+ auto* typeOf = lhs->toTypeOf();
+ auto* constant = rhs->toConstant();
+
+ JSType type = JSType(constant->toInt32());
+ return mozilla::Some(TypeOfCompareInput(typeOf, typeOf, type, true));
+ }
+
if (ins->compareType() != MCompare::Compare_String) {
return mozilla::Nothing();
}
@@ -4335,21 +4385,21 @@ static mozilla::Maybe<std::pair<MTypeOfName*, JSType>> IsTypeOfCompare(
auto* typeOfName =
lhs->isTypeOfName() ? lhs->toTypeOfName() : rhs->toTypeOfName();
- MOZ_ASSERT(typeOfName->input()->isTypeOf());
+ auto* typeOf = typeOfName->input()->toTypeOf();
auto* constant = lhs->isConstant() ? lhs->toConstant() : rhs->toConstant();
JSType type = TypeOfName(&constant->toString()->asLinear());
- return mozilla::Some(std::pair(typeOfName, type));
+ return mozilla::Some(TypeOfCompareInput(typeOfName, typeOf, type, false));
}
bool MCompare::tryFoldTypeOf(bool* result) {
- auto typeOfPair = IsTypeOfCompare(this);
- if (!typeOfPair) {
+ auto typeOfCompare = IsTypeOfCompare(this);
+ if (!typeOfCompare) {
return false;
}
- auto [typeOfName, type] = *typeOfPair;
- auto* typeOf = typeOfName->input()->toTypeOf();
+ auto* typeOf = typeOfCompare->typeOf;
+ JSType type = typeOfCompare->type;
switch (type) {
case JSTYPE_BOOLEAN:
@@ -4639,12 +4689,12 @@ bool MCompare::evaluateConstantOperands(TempAllocator& alloc, bool* result) {
}
MDefinition* MCompare::tryFoldTypeOf(TempAllocator& alloc) {
- auto typeOfPair = IsTypeOfCompare(this);
- if (!typeOfPair) {
+ auto typeOfCompare = IsTypeOfCompare(this);
+ if (!typeOfCompare) {
return this;
}
- auto [typeOfName, type] = *typeOfPair;
- auto* typeOf = typeOfName->input()->toTypeOf();
+ auto* typeOf = typeOfCompare->typeOf;
+ JSType type = typeOfCompare->type;
auto* input = typeOf->input();
MOZ_ASSERT(input->type() == MIRType::Value ||
@@ -4676,10 +4726,15 @@ MDefinition* MCompare::tryFoldTypeOf(TempAllocator& alloc) {
// In that case it'd more efficient to emit MTypeOf compared to MTypeOfIs. We
// don't yet handle that case, because it'd require a separate optimization
// pass to correctly detect it.
- if (typeOfName->hasOneUse()) {
+ if (typeOfCompare->typeOfSide->hasOneUse()) {
return MTypeOfIs::New(alloc, input, jsop(), type);
}
+ if (typeOfCompare->isIntComparison) {
+ // Already optimized.
+ return this;
+ }
+
MConstant* cst = MConstant::New(alloc, Int32Value(type));
block()->insertBefore(this, cst);
diff --git a/js/src/jit/MIR.h b/js/src/jit/MIR.h
index c672092f04..853bf365e2 100644
--- a/js/src/jit/MIR.h
+++ b/js/src/jit/MIR.h
@@ -42,6 +42,7 @@
#include "js/ScalarType.h" // js::Scalar::Type
#include "js/Value.h"
#include "js/Vector.h"
+#include "util/DifferentialTesting.h"
#include "vm/BigIntType.h"
#include "vm/EnvironmentObject.h"
#include "vm/FunctionFlags.h" // js::FunctionFlags
@@ -11895,7 +11896,13 @@ class MWasmNewStructObject : public MBinaryInstruction,
TRIVIAL_NEW_WRAPPERS
NAMED_OPERANDS((0, instance), (1, typeDefData))
- AliasSet getAliasSet() const override { return AliasSet::None(); }
+ AliasSet getAliasSet() const override {
+ if (js::SupportDifferentialTesting()) {
+ // Consider allocations effectful for differential testing.
+ return MDefinition::getAliasSet();
+ }
+ return AliasSet::None();
+ }
bool isOutline() const { return isOutline_; }
bool zeroFields() const { return zeroFields_; }
gc::AllocKind allocKind() const { return allocKind_; }
@@ -11923,7 +11930,13 @@ class MWasmNewArrayObject : public MTernaryInstruction,
TRIVIAL_NEW_WRAPPERS
NAMED_OPERANDS((0, instance), (1, numElements), (2, typeDefData))
- AliasSet getAliasSet() const override { return AliasSet::None(); }
+ AliasSet getAliasSet() const override {
+ if (js::SupportDifferentialTesting()) {
+ // Consider allocations effectful for differential testing.
+ return MDefinition::getAliasSet();
+ }
+ return AliasSet::None();
+ }
uint32_t elemSize() const { return elemSize_; }
bool zeroFields() const { return zeroFields_; }
wasm::BytecodeOffset bytecodeOffset() const { return bytecodeOffset_; }
diff --git a/js/src/jit/MIRGenerator.h b/js/src/jit/MIRGenerator.h
index 45b33618d0..fc3388793c 100644
--- a/js/src/jit/MIRGenerator.h
+++ b/js/src/jit/MIRGenerator.h
@@ -158,6 +158,7 @@ class MIRGenerator final {
public:
void disableLICM() { disableLICM_ = true; }
bool licmEnabled() const;
+ bool branchHintingEnabled() const;
private:
uint64_t minWasmMemory0Length_;
diff --git a/js/src/jit/MIRGraph.cpp b/js/src/jit/MIRGraph.cpp
index 29b5897a09..47b0ce9b6c 100644
--- a/js/src/jit/MIRGraph.cpp
+++ b/js/src/jit/MIRGraph.cpp
@@ -47,6 +47,10 @@ bool MIRGenerator::licmEnabled() const {
!outerInfo().hadLICMInvalidation();
}
+bool MIRGenerator::branchHintingEnabled() const {
+ return outerInfo().branchHintingEnabled();
+}
+
mozilla::GenericErrorResult<AbortReason> MIRGenerator::abort(AbortReason r) {
if (JitSpewEnabled(JitSpew_IonAbort)) {
switch (r) {
diff --git a/js/src/jit/MIRGraph.h b/js/src/jit/MIRGraph.h
index 18c0bf68a4..4de20a36fa 100644
--- a/js/src/jit/MIRGraph.h
+++ b/js/src/jit/MIRGraph.h
@@ -61,6 +61,9 @@ class MBasicBlock : public TempObject, public InlineListNode<MBasicBlock> {
// This block will unconditionally bail out.
bool alwaysBails_ = false;
+ // Will be used for branch hinting in wasm.
+ wasm::BranchHint branchHint_ = wasm::BranchHint::Invalid;
+
// Pushes a copy of a local variable or argument.
void pushVariable(uint32_t slot) { push(slots_[slot]); }
@@ -375,6 +378,15 @@ class MBasicBlock : public TempObject, public InlineListNode<MBasicBlock> {
uint32_t id() const { return id_; }
uint32_t numPredecessors() const { return predecessors_.length(); }
+ bool branchHintingUnlikely() const {
+ return branchHint_ == wasm::BranchHint::Unlikely;
+ }
+ bool branchHintingLikely() const {
+ return branchHint_ == wasm::BranchHint::Likely;
+ }
+
+ void setBranchHinting(wasm::BranchHint value) { branchHint_ = value; }
+
uint32_t domIndex() const {
MOZ_ASSERT(!isDead());
return domIndex_;
diff --git a/js/src/jit/MIROps.yaml b/js/src/jit/MIROps.yaml
index 78ab989221..96a41cc1ca 100644
--- a/js/src/jit/MIROps.yaml
+++ b/js/src/jit/MIROps.yaml
@@ -3352,6 +3352,25 @@
- name: Rotate
gen_boilerplate: false
+- name: WasmStackSwitchToMain
+ operands:
+ suspender: Object
+ fn: Object
+ data: WasmAnyRef
+ type_policy: none
+
+- name: WasmStackSwitchToSuspendable
+ operands:
+ suspender: Object
+ fn: Object
+ data: WasmAnyRef
+ type_policy: none
+
+- name: WasmStackContinueOnSuspendable
+ operands:
+ suspender: Object
+ type_policy: none
+
- name: WasmBinarySimd128
gen_boilerplate: false
diff --git a/js/src/jit/MacroAssembler-inl.h b/js/src/jit/MacroAssembler-inl.h
index e1df31eff9..90bf54bf00 100644
--- a/js/src/jit/MacroAssembler-inl.h
+++ b/js/src/jit/MacroAssembler-inl.h
@@ -484,17 +484,13 @@ void MacroAssembler::branchIfNotFunctionIsNonBuiltinCtor(Register fun,
branch32(Assembler::NotEqual, scratch, Imm32(expected), label);
}
-void MacroAssembler::branchIfFunctionHasNoJitEntry(Register fun,
- bool isConstructing,
- Label* label) {
- uint16_t flags = FunctionFlags::HasJitEntryFlags(isConstructing);
+void MacroAssembler::branchIfFunctionHasNoJitEntry(Register fun, Label* label) {
+ uint16_t flags = FunctionFlags::HasJitEntryFlags();
branchTestFunctionFlags(fun, flags, Assembler::Zero, label);
}
-void MacroAssembler::branchIfFunctionHasJitEntry(Register fun,
- bool isConstructing,
- Label* label) {
- uint16_t flags = FunctionFlags::HasJitEntryFlags(isConstructing);
+void MacroAssembler::branchIfFunctionHasJitEntry(Register fun, Label* label) {
+ uint16_t flags = FunctionFlags::HasJitEntryFlags();
branchTestFunctionFlags(fun, flags, Assembler::NonZero, label);
}
diff --git a/js/src/jit/MacroAssembler.cpp b/js/src/jit/MacroAssembler.cpp
index 9fc4b96830..a5d2073fb9 100644
--- a/js/src/jit/MacroAssembler.cpp
+++ b/js/src/jit/MacroAssembler.cpp
@@ -1310,7 +1310,7 @@ void MacroAssembler::loadStringChars(Register str, Register dest,
MOZ_ASSERT(encoding == CharEncoding::TwoByte);
static constexpr uint32_t Mask =
JSString::LINEAR_BIT | JSString::LATIN1_CHARS_BIT;
- static_assert(Mask < 1024,
+ static_assert(Mask < 2048,
"Mask should be a small, near-null value to ensure we "
"block speculative execution when it's used as string "
"pointer");
@@ -1344,7 +1344,7 @@ void MacroAssembler::loadNonInlineStringChars(Register str, Register dest,
static constexpr uint32_t Mask = JSString::LINEAR_BIT |
JSString::INLINE_CHARS_BIT |
JSString::LATIN1_CHARS_BIT;
- static_assert(Mask < 1024,
+ static_assert(Mask < 2048,
"Mask should be a small, near-null value to ensure we "
"block speculative execution when it's used as string "
"pointer");
@@ -2659,11 +2659,15 @@ void MacroAssembler::loadMegamorphicSetPropCache(Register dest) {
movePtr(ImmPtr(runtime()->addressOfMegamorphicSetPropCache()), dest);
}
-void MacroAssembler::lookupStringInAtomCacheLastLookups(Register str,
- Register scratch,
- Register output,
- Label* fail) {
- Label found;
+void MacroAssembler::tryFastAtomize(Register str, Register scratch,
+ Register output, Label* fail) {
+ Label found, done, notAtomRef;
+
+ branchTest32(Assembler::Zero, Address(str, JSString::offsetOfFlags()),
+ Imm32(JSString::ATOM_REF_BIT), &notAtomRef);
+ loadPtr(Address(str, JSAtomRefString::offsetOfAtom()), output);
+ jump(&done);
+ bind(&notAtomRef);
uintptr_t cachePtr = uintptr_t(runtime()->addressOfStringToAtomCache());
void* offset = (void*)(cachePtr + StringToAtomCache::offsetOfLastLookups());
@@ -2682,6 +2686,7 @@ void MacroAssembler::lookupStringInAtomCacheLastLookups(Register str,
bind(&found);
size_t atomOffset = StringToAtomCache::LastLookup::offsetOfAtom();
loadPtr(Address(scratch, atomOffset), output);
+ bind(&done);
}
void MacroAssembler::loadAtomHash(Register id, Register outHash, Label* done) {
@@ -2741,7 +2746,7 @@ void MacroAssembler::loadAtomOrSymbolAndHash(ValueOperand value, Register outId,
loadAtomHash(outId, outHash, &done);
bind(&nonAtom);
- lookupStringInAtomCacheLastLookups(outId, outHash, outId, cacheMiss);
+ tryFastAtomize(outId, outHash, outId, cacheMiss);
jump(&atom);
bind(&done);
@@ -3382,7 +3387,7 @@ void MacroAssembler::guardSpecificAtom(Register str, JSAtom* atom,
Register scratch,
const LiveRegisterSet& volatileRegs,
Label* fail) {
- Label done;
+ Label done, notCachedAtom;
branchPtr(Assembler::Equal, str, ImmGCPtr(atom), &done);
// The pointers are not equal, so if the input string is also an atom it
@@ -3390,6 +3395,12 @@ void MacroAssembler::guardSpecificAtom(Register str, JSAtom* atom,
branchTest32(Assembler::NonZero, Address(str, JSString::offsetOfFlags()),
Imm32(JSString::ATOM_BIT), fail);
+ // Try to do a cheap atomize on the string and repeat the above test
+ tryFastAtomize(str, scratch, scratch, &notCachedAtom);
+ branchPtr(Assembler::Equal, scratch, ImmGCPtr(atom), &done);
+ jump(fail);
+ bind(&notCachedAtom);
+
// Check the length.
branch32(Assembler::NotEqual, Address(str, JSString::offsetOfLength()),
Imm32(atom->length()), fail);
@@ -6551,6 +6562,14 @@ void MacroAssembler::branchWasmRefIsSubtypeExn(Register ref,
branchTestPtr(Assembler::Zero, ref, ref, nullLabel);
}
+ // The only value that can inhabit 'noexn' is null. So, early out if we got
+ // not-null.
+ if (destType.isNoExn()) {
+ jump(failLabel);
+ bind(&fallthrough);
+ return;
+ }
+
// There are no other possible types except exnref, so succeed!
jump(successLabel);
bind(&fallthrough);
@@ -7921,10 +7940,15 @@ void MacroAssembler::typedArrayElementSize(Register obj, Register output) {
branchPtr(Assembler::Below, output, ImmPtr(classForType(Scalar::BigInt64)),
&one);
+ static_assert(ValidateSizeRange(Scalar::BigInt64, Scalar::Float16),
+ "element size is eight in [BigInt64, Float16)");
+ branchPtr(Assembler::Below, output, ImmPtr(classForType(Scalar::Float16)),
+ &eight);
+
static_assert(
- ValidateSizeRange(Scalar::BigInt64, Scalar::MaxTypedArrayViewType),
- "element size is eight in [BigInt64, MaxTypedArrayViewType)");
- // Fall through for BigInt64 and BigUint64
+ ValidateSizeRange(Scalar::Float16, Scalar::MaxTypedArrayViewType),
+ "element size is two in [Float16, MaxTypedArrayViewType)");
+ jump(&two);
bind(&eight);
move32(Imm32(8), output);
@@ -7993,10 +8017,16 @@ void MacroAssembler::resizableTypedArrayElementShiftBy(Register obj,
branchPtr(Assembler::Below, scratch, ImmPtr(classForType(Scalar::BigInt64)),
&zero);
+ static_assert(ValidateSizeRange(Scalar::BigInt64, Scalar::Float16),
+ "element shift is three in [BigInt64, Float16)");
+ branchPtr(Assembler::Below, scratch, ImmPtr(classForType(Scalar::Float16)),
+ &three);
+
static_assert(
- ValidateSizeRange(Scalar::BigInt64, Scalar::MaxTypedArrayViewType),
- "element shift is three in [BigInt64, MaxTypedArrayViewType)");
- // Fall through for BigInt64 and BigUint64
+ ValidateSizeRange(Scalar::Float16, Scalar::MaxTypedArrayViewType),
+ "element shift is one in [Float16, MaxTypedArrayViewType)");
+ branchPtr(Assembler::Below, scratch, ImmPtr(classForType(Scalar::Float16)),
+ &one);
bind(&three);
rshiftPtr(Imm32(3), output);
diff --git a/js/src/jit/MacroAssembler.h b/js/src/jit/MacroAssembler.h
index 114aaa47d7..b4ad95b86b 100644
--- a/js/src/jit/MacroAssembler.h
+++ b/js/src/jit/MacroAssembler.h
@@ -1727,10 +1727,8 @@ class MacroAssembler : public MacroAssemblerSpecific {
Register scratch,
Label* label);
- inline void branchIfFunctionHasNoJitEntry(Register fun, bool isConstructing,
- Label* label);
- inline void branchIfFunctionHasJitEntry(Register fun, bool isConstructing,
- Label* label);
+ inline void branchIfFunctionHasNoJitEntry(Register fun, Label* label);
+ inline void branchIfFunctionHasJitEntry(Register fun, Label* label);
inline void branchIfScriptHasJitScript(Register script, Label* label);
inline void branchIfScriptHasNoJitScript(Register script, Label* label);
@@ -5602,8 +5600,8 @@ class MacroAssembler : public MacroAssemblerSpecific {
void setIsDefinitelyTypedArrayConstructor(Register obj, Register output);
void loadMegamorphicCache(Register dest);
- void lookupStringInAtomCacheLastLookups(Register str, Register scratch,
- Register output, Label* fail);
+ void tryFastAtomize(Register str, Register scratch, Register output,
+ Label* fail);
void loadMegamorphicSetPropCache(Register dest);
void loadAtomOrSymbolAndHash(ValueOperand value, Register outId,
diff --git a/js/src/jit/RangeAnalysis.cpp b/js/src/jit/RangeAnalysis.cpp
index bd8380a690..3a7abd8f3e 100644
--- a/js/src/jit/RangeAnalysis.cpp
+++ b/js/src/jit/RangeAnalysis.cpp
@@ -23,6 +23,7 @@
#include "util/CheckedArithmetic.h"
#include "util/Unicode.h"
#include "vm/ArgumentsObject.h"
+#include "vm/Float16.h"
#include "vm/TypedArrayObject.h"
#include "vm/Uint8Clamped.h"
@@ -1758,6 +1759,7 @@ static Range* GetArrayBufferViewRange(TempAllocator& alloc, Scalar::Type type) {
case Scalar::BigUint64:
case Scalar::Int64:
case Scalar::Simd128:
+ case Scalar::Float16:
case Scalar::Float32:
case Scalar::Float64:
case Scalar::MaxTypedArrayViewType:
diff --git a/js/src/jit/TrampolineNatives.cpp b/js/src/jit/TrampolineNatives.cpp
index e22023f8dd..a0d7c979c1 100644
--- a/js/src/jit/TrampolineNatives.cpp
+++ b/js/src/jit/TrampolineNatives.cpp
@@ -14,6 +14,7 @@
#include "jit/PerfSpewer.h"
#include "js/CallArgs.h"
#include "js/experimental/JitInfo.h"
+#include "vm/TypedArrayObject.h"
#include "jit/MacroAssembler-inl.h"
#include "vm/Activation-inl.h"
@@ -42,7 +43,8 @@ void js::jit::SetTrampolineNativeJitEntry(JSContext* cx, JSFunction* fun,
fun->setTrampolineNativeJitEntry(entry);
}
-uint32_t JitRuntime::generateArraySortTrampoline(MacroAssembler& masm) {
+uint32_t JitRuntime::generateArraySortTrampoline(MacroAssembler& masm,
+ ArraySortKind kind) {
AutoCreatedBy acb(masm, "JitRuntime::generateArraySortTrampoline");
const uint32_t offset = startTrampolineCode(masm);
@@ -108,11 +110,12 @@ uint32_t JitRuntime::generateArraySortTrampoline(MacroAssembler& masm) {
// Trampoline control flow looks like this:
//
- // call ArraySortFromJit
+ // call ArraySortFromJit or TypedArraySortFromJit
// goto checkReturnValue
// call_comparator:
// call comparator
- // call ArraySortData::sortWithComparator
+ // call ArraySortData::sortArrayWithComparator or
+ // ArraySortData::sortTypedArrayWithComparator
// checkReturnValue:
// check return value, jump to call_comparator if needed
// return rval
@@ -125,7 +128,7 @@ uint32_t JitRuntime::generateArraySortTrampoline(MacroAssembler& masm) {
masm.enterFakeExitFrame(cxReg, scratchReg, ExitFrameType::Bare);
};
- // Call ArraySortFromJit.
+ // Call {Typed}ArraySortFromJit.
using Fn1 = ArraySortResult (*)(JSContext* cx,
jit::TrampolineNativeFrameLayout* frame);
masm.loadJSContext(temp0);
@@ -133,8 +136,16 @@ uint32_t JitRuntime::generateArraySortTrampoline(MacroAssembler& masm) {
masm.setupAlignedABICall();
masm.passABIArg(temp0);
masm.passABIArg(FramePointer);
- masm.callWithABI<Fn1, ArraySortFromJit>(
- ABIType::General, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+ switch (kind) {
+ case ArraySortKind::Array:
+ masm.callWithABI<Fn1, ArraySortFromJit>(
+ ABIType::General, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+ break;
+ case ArraySortKind::TypedArray:
+ masm.callWithABI<Fn1, TypedArraySortFromJit>(
+ ABIType::General, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+ break;
+ }
// Check return value.
Label checkReturnValue;
@@ -192,15 +203,23 @@ uint32_t JitRuntime::generateArraySortTrampoline(MacroAssembler& masm) {
masm.bind(&callDone);
masm.storeValue(JSReturnOperand, Address(FramePointer, RvalOffset));
- // Call ArraySortData::sortWithComparator.
+ // Call ArraySortData::sort{Typed}ArrayWithComparator.
using Fn2 = ArraySortResult (*)(ArraySortData* data);
masm.moveStackPtrTo(temp2);
masm.loadJSContext(temp0);
pushExitFrame(temp0, temp1);
masm.setupAlignedABICall();
masm.passABIArg(temp2);
- masm.callWithABI<Fn2, ArraySortData::sortWithComparator>(
- ABIType::General, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+ switch (kind) {
+ case ArraySortKind::Array:
+ masm.callWithABI<Fn2, ArraySortData::sortArrayWithComparator>(
+ ABIType::General, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+ break;
+ case ArraySortKind::TypedArray:
+ masm.callWithABI<Fn2, ArraySortData::sortTypedArrayWithComparator>(
+ ABIType::General, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+ break;
+ }
// Check return value.
masm.bind(&checkReturnValue);
@@ -231,8 +250,13 @@ uint32_t JitRuntime::generateArraySortTrampoline(MacroAssembler& masm) {
void JitRuntime::generateTrampolineNatives(
MacroAssembler& masm, TrampolineNativeJitEntryOffsets& offsets,
PerfSpewerRangeRecorder& rangeRecorder) {
- offsets[TrampolineNative::ArraySort] = generateArraySortTrampoline(masm);
+ offsets[TrampolineNative::ArraySort] =
+ generateArraySortTrampoline(masm, ArraySortKind::Array);
rangeRecorder.recordOffset("Trampoline: ArraySort");
+
+ offsets[TrampolineNative::TypedArraySort] =
+ generateArraySortTrampoline(masm, ArraySortKind::TypedArray);
+ rangeRecorder.recordOffset("Trampoline: TypedArraySort");
}
bool jit::CallTrampolineNativeJitCode(JSContext* cx, TrampolineNative native,
diff --git a/js/src/jit/TrampolineNatives.h b/js/src/jit/TrampolineNatives.h
index f71a3b707d..6151a78fda 100644
--- a/js/src/jit/TrampolineNatives.h
+++ b/js/src/jit/TrampolineNatives.h
@@ -32,7 +32,9 @@ class CallArgs;
} // namespace JS
// List of all trampoline natives.
-#define TRAMPOLINE_NATIVE_LIST(_) _(ArraySort)
+#define TRAMPOLINE_NATIVE_LIST(_) \
+ _(ArraySort) \
+ _(TypedArraySort)
namespace js {
namespace jit {
diff --git a/js/src/jit/TypePolicy.cpp b/js/src/jit/TypePolicy.cpp
index bf3cb68eea..7d677bfb06 100644
--- a/js/src/jit/TypePolicy.cpp
+++ b/js/src/jit/TypePolicy.cpp
@@ -183,124 +183,99 @@ bool AllDoublePolicy::staticAdjustInputs(TempAllocator& alloc,
bool ComparePolicy::adjustInputs(TempAllocator& alloc,
MInstruction* def) const {
- MOZ_ASSERT(def->isCompare());
- MCompare* compare = def->toCompare();
-
- // Convert Float32 operands to doubles
- for (size_t i = 0; i < 2; i++) {
- MDefinition* in = def->getOperand(i);
- if (in->type() == MIRType::Float32) {
- MInstruction* replace = MToDouble::New(alloc, in);
- def->block()->insertBefore(def, replace);
- def->replaceOperand(i, replace);
- }
- }
-
- auto replaceOperand = [&](size_t index, auto* replace) {
- def->block()->insertBefore(def, replace);
- def->replaceOperand(index, replace);
- return replace->typePolicy()->adjustInputs(alloc, replace);
- };
-
- if (compare->compareType() == MCompare::Compare_Undefined ||
- compare->compareType() == MCompare::Compare_Null) {
- // Nothing to do for undefined and null, lowering handles all types.
- return true;
- }
-
- if (compare->compareType() == MCompare::Compare_UIntPtr) {
- MOZ_ASSERT(compare->lhs()->type() == MIRType::IntPtr);
- MOZ_ASSERT(compare->rhs()->type() == MIRType::IntPtr);
- return true;
- }
-
- // Compare_BigInt_Int32 specialization is done for "BigInt <cmp> Int32".
- // Compare_BigInt_Double specialization is done for "BigInt <cmp> Double".
- // Compare_BigInt_String specialization is done for "BigInt <cmp> String".
- if (compare->compareType() == MCompare::Compare_BigInt_Int32 ||
- compare->compareType() == MCompare::Compare_BigInt_Double ||
- compare->compareType() == MCompare::Compare_BigInt_String) {
- if (MDefinition* in = def->getOperand(0); in->type() != MIRType::BigInt) {
- auto* replace =
- MUnbox::New(alloc, in, MIRType::BigInt, MUnbox::Infallible);
- if (!replaceOperand(0, replace)) {
- return false;
- }
+ auto convertOperand = [&](size_t index, MIRType expected) {
+ MDefinition* operand = def->getOperand(index);
+ if (operand->type() == expected) {
+ return true;
}
-
- MDefinition* in = def->getOperand(1);
-
MInstruction* replace = nullptr;
- if (compare->compareType() == MCompare::Compare_BigInt_Int32) {
- if (in->type() != MIRType::Int32) {
- replace = MToNumberInt32::New(
- alloc, in, IntConversionInputKind::NumbersOrBoolsOnly);
- }
- } else if (compare->compareType() == MCompare::Compare_BigInt_Double) {
- if (in->type() != MIRType::Double) {
- replace = MToDouble::New(alloc, in, MToFPInstruction::NumbersOnly);
- }
- } else {
- MOZ_ASSERT(compare->compareType() == MCompare::Compare_BigInt_String);
- if (in->type() != MIRType::String) {
- replace = MUnbox::New(alloc, in, MIRType::String, MUnbox::Infallible);
- }
- }
-
- if (replace) {
- if (!replaceOperand(1, replace)) {
- return false;
- }
- }
-
- return true;
- }
-
- // Convert all inputs to the right input type
- MIRType type = compare->inputType();
- MOZ_ASSERT(type == MIRType::Int32 || type == MIRType::Double ||
- type == MIRType::Float32 || type == MIRType::Object ||
- type == MIRType::String || type == MIRType::Symbol ||
- type == MIRType::BigInt);
- for (size_t i = 0; i < 2; i++) {
- MDefinition* in = def->getOperand(i);
- if (in->type() == type) {
- continue;
- }
-
- MInstruction* replace;
-
- switch (type) {
+ switch (expected) {
case MIRType::Double:
- replace = MToDouble::New(alloc, in, MToFPInstruction::NumbersOnly);
- break;
- case MIRType::Float32:
- replace = MToFloat32::New(alloc, in, MToFPInstruction::NumbersOnly);
+ replace = MToDouble::New(alloc, operand);
break;
- case MIRType::Int32: {
- IntConversionInputKind convert = IntConversionInputKind::NumbersOnly;
- replace = MToNumberInt32::New(alloc, in, convert);
+ case MIRType::Int32:
+ replace = MToNumberInt32::New(alloc, operand);
break;
- }
- case MIRType::Object:
- replace = MUnbox::New(alloc, in, MIRType::Object, MUnbox::Infallible);
+ case MIRType::Float32:
+ replace = MToFloat32::New(alloc, operand);
break;
case MIRType::String:
- replace = MUnbox::New(alloc, in, MIRType::String, MUnbox::Infallible);
+ replace =
+ MUnbox::New(alloc, operand, MIRType::String, MUnbox::Fallible);
break;
case MIRType::Symbol:
- replace = MUnbox::New(alloc, in, MIRType::Symbol, MUnbox::Infallible);
+ replace =
+ MUnbox::New(alloc, operand, MIRType::Symbol, MUnbox::Fallible);
+ break;
+ case MIRType::Object:
+ replace =
+ MUnbox::New(alloc, operand, MIRType::Object, MUnbox::Fallible);
break;
case MIRType::BigInt:
- replace = MUnbox::New(alloc, in, MIRType::BigInt, MUnbox::Infallible);
+ replace =
+ MUnbox::New(alloc, operand, MIRType::BigInt, MUnbox::Fallible);
break;
default:
- MOZ_CRASH("Unknown compare specialization");
+ MOZ_CRASH("Unsupported MIRType");
}
+ replace->setBailoutKind(BailoutKind::TypePolicy);
+ def->block()->insertBefore(def, replace);
+ def->replaceOperand(index, replace);
+ return replace->typePolicy()->adjustInputs(alloc, replace);
+ };
- if (!replaceOperand(i, replace)) {
- return false;
- }
+ MOZ_ASSERT(def->isCompare());
+ MCompare* compare = def->toCompare();
+ switch (compare->compareType()) {
+ case MCompare::Compare_Undefined:
+ case MCompare::Compare_Null:
+ MOZ_ASSERT(compare->rhs()->type() == MIRType::Undefined ||
+ compare->rhs()->type() == MIRType::Null);
+ // IF the operand is float32, we must convert it to a double.
+ if (compare->lhs()->type() == MIRType::Float32) {
+ MInstruction* replace = MToDouble::New(alloc, compare->lhs());
+ def->block()->insertBefore(def, replace);
+ def->replaceOperand(0, replace);
+ return replace->typePolicy()->adjustInputs(alloc, replace);
+ }
+ // GVN and lowering handle all other types.
+ return true;
+ case MCompare::Compare_Int32:
+ return convertOperand(0, MIRType::Int32) &&
+ convertOperand(1, MIRType::Int32);
+ case MCompare::Compare_UIntPtr:
+ MOZ_ASSERT(compare->lhs()->type() == MIRType::IntPtr);
+ MOZ_ASSERT(compare->rhs()->type() == MIRType::IntPtr);
+ return true;
+ case MCompare::Compare_Double:
+ return convertOperand(0, MIRType::Double) &&
+ convertOperand(1, MIRType::Double);
+ case MCompare::Compare_Float32:
+ return convertOperand(0, MIRType::Float32) &&
+ convertOperand(1, MIRType::Float32);
+ case MCompare::Compare_String:
+ return convertOperand(0, MIRType::String) &&
+ convertOperand(1, MIRType::String);
+ case MCompare::Compare_Symbol:
+ return convertOperand(0, MIRType::Symbol) &&
+ convertOperand(1, MIRType::Symbol);
+ case MCompare::Compare_Object:
+ return convertOperand(0, MIRType::Object) &&
+ convertOperand(1, MIRType::Object);
+ case MCompare::Compare_BigInt:
+ return convertOperand(0, MIRType::BigInt) &&
+ convertOperand(1, MIRType::BigInt);
+ case MCompare::Compare_BigInt_Int32:
+ return convertOperand(0, MIRType::BigInt) &&
+ convertOperand(1, MIRType::Int32);
+ case MCompare::Compare_BigInt_Double:
+ return convertOperand(0, MIRType::BigInt) &&
+ convertOperand(1, MIRType::Double);
+ case MCompare::Compare_BigInt_String:
+ return convertOperand(0, MIRType::BigInt) &&
+ convertOperand(1, MIRType::String);
+ default:
+ MOZ_CRASH("Unexpected compare type");
}
return true;
diff --git a/js/src/jit/VMFunctionList-inl.h b/js/src/jit/VMFunctionList-inl.h
index 99b98f17ed..aae6b866e2 100644
--- a/js/src/jit/VMFunctionList-inl.h
+++ b/js/src/jit/VMFunctionList-inl.h
@@ -173,6 +173,7 @@ namespace jit {
_(DoToBoolFallback, js::jit::DoToBoolFallback) \
_(DoToPropertyKeyFallback, js::jit::DoToPropertyKeyFallback) \
_(DoTrialInlining, js::jit::DoTrialInlining) \
+ _(DoTypeOfEqFallback, js::jit::DoTypeOfEqFallback) \
_(DoTypeOfFallback, js::jit::DoTypeOfFallback) \
_(DoUnaryArithFallback, js::jit::DoUnaryArithFallback, 1) \
_(EnterWith, js::jit::EnterWith) \
@@ -211,7 +212,6 @@ namespace jit {
_(InterpretResume, js::jit::InterpretResume) \
_(InterruptCheck, js::jit::InterruptCheck) \
_(InvokeFunction, js::jit::InvokeFunction) \
- _(InvokeNativeFunction, js::jit::InvokeNativeFunction) \
_(IonBinaryArithICUpdate, js::jit::IonBinaryArithIC::update) \
_(IonBindNameICUpdate, js::jit::IonBindNameIC::update) \
_(IonCheckPrivateFieldICUpdate, js::jit::IonCheckPrivateFieldIC::update) \
diff --git a/js/src/jit/VMFunctions.cpp b/js/src/jit/VMFunctions.cpp
index 3ec85a72c2..7d729351b7 100644
--- a/js/src/jit/VMFunctions.cpp
+++ b/js/src/jit/VMFunctions.cpp
@@ -38,6 +38,7 @@
#include "vm/SelfHosting.h"
#include "vm/StaticStrings.h"
#include "vm/TypedArrayObject.h"
+#include "vm/TypeofEqOperand.h" // TypeofEqOperand
#include "vm/Watchtower.h"
#include "wasm/WasmGcObject.h"
@@ -545,39 +546,6 @@ bool InvokeFunction(JSContext* cx, HandleObject obj, bool constructing,
return Call(cx, fval, thisv, args, rval);
}
-bool InvokeNativeFunction(JSContext* cx, bool constructing,
- bool ignoresReturnValue, uint32_t argc, Value* argv,
- MutableHandleValue rval) {
- // Ensure argv array is rooted - we may GC in here.
- size_t numValues = argc + 2 + constructing;
- RootedExternalValueArray argvRoot(cx, numValues, argv);
-
- // Data in the argument vector is arranged for a JIT -> C++ call.
- CallArgs callArgs = CallArgsFromSp(argc + constructing, argv + numValues,
- constructing, ignoresReturnValue);
-
- // This function is only called when the callee is a native function.
- MOZ_ASSERT(callArgs.callee().as<JSFunction>().isNativeWithoutJitEntry());
-
- if (constructing) {
- MOZ_ASSERT(callArgs.thisv().isMagic(JS_IS_CONSTRUCTING));
-
- if (!ConstructFromStack(cx, callArgs)) {
- return false;
- }
-
- MOZ_ASSERT(callArgs.rval().isObject(),
- "native constructors don't return primitives");
- } else {
- if (!CallFromStack(cx, callArgs)) {
- return false;
- }
- }
-
- rval.set(callArgs.rval());
- return true;
-}
-
void* GetContextSensitiveInterpreterStub() {
return TlsContext.get()->runtime()->jitRuntime()->interpreterStub().value;
}
@@ -2280,6 +2248,15 @@ JSString* TypeOfNameObject(JSObject* obj, JSRuntime* rt) {
return TypeName(type, *rt->commonNames);
}
+bool TypeOfEqObject(JSObject* obj, TypeofEqOperand operand) {
+ AutoUnsafeCallWithABI unsafe;
+ bool result = js::TypeOfObject(obj) == operand.type();
+ if (operand.compareOp() == JSOp::Ne) {
+ result = !result;
+ }
+ return result;
+}
+
bool GetPrototypeOf(JSContext* cx, HandleObject target,
MutableHandleValue rval) {
MOZ_ASSERT(target->hasDynamicPrototype());
diff --git a/js/src/jit/VMFunctions.h b/js/src/jit/VMFunctions.h
index b5ac5d700b..e769fd306a 100644
--- a/js/src/jit/VMFunctions.h
+++ b/js/src/jit/VMFunctions.h
@@ -19,6 +19,7 @@
#include "gc/AllocKind.h"
#include "js/ScalarType.h"
#include "js/TypeDecls.h"
+#include "vm/TypeofEqOperand.h"
class JSJitInfo;
class JSLinearString;
@@ -354,10 +355,6 @@ struct LastArg<HeadType, TailTypes...> {
uint32_t argc, Value* argv,
MutableHandleValue rval);
-[[nodiscard]] bool InvokeNativeFunction(JSContext* cx, bool constructing,
- bool ignoresReturnValue, uint32_t argc,
- Value* argv, MutableHandleValue rval);
-
bool InvokeFromInterpreterStub(JSContext* cx,
InterpreterStubExitFrameLayout* frame);
void* GetContextSensitiveInterpreterStub();
@@ -585,6 +582,8 @@ bool SetPropertyMegamorphic(JSContext* cx, HandleObject obj, HandleId id,
JSString* TypeOfNameObject(JSObject* obj, JSRuntime* rt);
+bool TypeOfEqObject(JSObject* obj, TypeofEqOperand operand);
+
bool GetPrototypeOf(JSContext* cx, HandleObject target,
MutableHandleValue rval);
diff --git a/js/src/jit/WarpBuilder.cpp b/js/src/jit/WarpBuilder.cpp
index cad28fa535..4ca882c4b1 100644
--- a/js/src/jit/WarpBuilder.cpp
+++ b/js/src/jit/WarpBuilder.cpp
@@ -21,6 +21,7 @@
#include "vm/GeneratorObject.h"
#include "vm/Interpreter.h"
#include "vm/Opcodes.h"
+#include "vm/TypeofEqOperand.h" // TypeofEqOperand
#include "gc/ObjectKind-inl.h"
#include "vm/BytecodeIterator-inl.h"
@@ -1564,6 +1565,30 @@ bool WarpBuilder::build_TypeofExpr(BytecodeLocation loc) {
return build_Typeof(loc);
}
+bool WarpBuilder::build_TypeofEq(BytecodeLocation loc) {
+ auto operand = loc.getTypeofEqOperand();
+ JSType type = operand.type();
+ JSOp compareOp = operand.compareOp();
+ MDefinition* input = current->pop();
+
+ if (const auto* typesSnapshot = getOpSnapshot<WarpPolymorphicTypes>(loc)) {
+ auto* typeOf = MTypeOf::New(alloc(), input);
+ typeOf->setObservedTypes(typesSnapshot->list());
+ current->add(typeOf);
+
+ auto* typeInt = MConstant::New(alloc(), Int32Value(type));
+ current->add(typeInt);
+
+ auto* ins = MCompare::New(alloc(), typeOf, typeInt, compareOp,
+ MCompare::Compare_Int32);
+ current->add(ins);
+ current->push(ins);
+ return true;
+ }
+
+ return buildIC(loc, CacheKind::TypeOfEq, {input});
+}
+
bool WarpBuilder::build_Arguments(BytecodeLocation loc) {
auto* snapshot = getOpSnapshot<WarpArguments>(loc);
MOZ_ASSERT(info().needsArgsObj());
@@ -3402,6 +3427,23 @@ bool WarpBuilder::buildIC(BytecodeLocation loc, CacheKind kind,
current->push(ins);
return true;
}
+ case CacheKind::TypeOfEq: {
+ MOZ_ASSERT(numInputs == 1);
+ auto operand = loc.getTypeofEqOperand();
+ JSType type = operand.type();
+ JSOp compareOp = operand.compareOp();
+ auto* typeOf = MTypeOf::New(alloc(), getInput(0));
+ current->add(typeOf);
+
+ auto* typeInt = MConstant::New(alloc(), Int32Value(type));
+ current->add(typeInt);
+
+ auto* ins = MCompare::New(alloc(), typeOf, typeInt, compareOp,
+ MCompare::Compare_Int32);
+ current->add(ins);
+ current->push(ins);
+ return true;
+ }
case CacheKind::NewObject: {
auto* templateConst = constant(NullValue());
MNewObject* ins = MNewObject::NewVM(
@@ -3482,6 +3524,7 @@ bool WarpBuilder::buildBailoutForColdIC(BytecodeLocation loc, CacheKind kind) {
case CacheKind::CheckPrivateField:
case CacheKind::InstanceOf:
case CacheKind::OptimizeGetIterator:
+ case CacheKind::TypeOfEq:
resultType = MIRType::Boolean;
break;
case CacheKind::SetProp:
diff --git a/js/src/jit/WarpCacheIRTranspiler.cpp b/js/src/jit/WarpCacheIRTranspiler.cpp
index fdaafd00b3..2e4a4b9613 100644
--- a/js/src/jit/WarpCacheIRTranspiler.cpp
+++ b/js/src/jit/WarpCacheIRTranspiler.cpp
@@ -25,6 +25,7 @@
#include "jit/WarpSnapshot.h"
#include "js/ScalarType.h" // js::Scalar::Type
#include "vm/BytecodeLocation.h"
+#include "vm/TypeofEqOperand.h" // TypeofEqOperand
#include "wasm/WasmCode.h"
#include "gc/ObjectKind-inl.h"
@@ -1117,10 +1118,9 @@ bool WarpCacheIRTranspiler::emitGuardNoDenseElements(ObjOperandId objId) {
return true;
}
-bool WarpCacheIRTranspiler::emitGuardFunctionHasJitEntry(ObjOperandId funId,
- bool constructing) {
+bool WarpCacheIRTranspiler::emitGuardFunctionHasJitEntry(ObjOperandId funId) {
MDefinition* fun = getOperand(funId);
- uint16_t expectedFlags = FunctionFlags::HasJitEntryFlags(constructing);
+ uint16_t expectedFlags = FunctionFlags::HasJitEntryFlags();
uint16_t unexpectedFlags = 0;
auto* ins =
@@ -1134,8 +1134,7 @@ bool WarpCacheIRTranspiler::emitGuardFunctionHasJitEntry(ObjOperandId funId,
bool WarpCacheIRTranspiler::emitGuardFunctionHasNoJitEntry(ObjOperandId funId) {
MDefinition* fun = getOperand(funId);
uint16_t expectedFlags = 0;
- uint16_t unexpectedFlags =
- FunctionFlags::HasJitEntryFlags(/*isConstructing=*/false);
+ uint16_t unexpectedFlags = FunctionFlags::HasJitEntryFlags();
auto* ins =
MGuardFunctionFlags::New(alloc(), fun, expectedFlags, unexpectedFlags);
@@ -1626,6 +1625,22 @@ bool WarpCacheIRTranspiler::emitLoadTypeOfObjectResult(ObjOperandId objId) {
return true;
}
+bool WarpCacheIRTranspiler::emitLoadTypeOfEqObjectResult(
+ ObjOperandId objId, TypeofEqOperand operand) {
+ MDefinition* obj = getOperand(objId);
+ auto* typeOf = MTypeOf::New(alloc(), obj);
+ add(typeOf);
+
+ auto* typeInt = MConstant::New(alloc(), Int32Value(operand.type()));
+ add(typeInt);
+
+ auto* ins = MCompare::New(alloc(), typeOf, typeInt, operand.compareOp(),
+ MCompare::Compare_Int32);
+ add(ins);
+ pushResult(ins);
+ return true;
+}
+
bool WarpCacheIRTranspiler::emitLoadEnclosingEnvironment(
ObjOperandId objId, ObjOperandId resultId) {
MDefinition* env = getOperand(objId);
diff --git a/js/src/jit/WarpOracle.cpp b/js/src/jit/WarpOracle.cpp
index 624ebfcedf..d0c6d30d8c 100644
--- a/js/src/jit/WarpOracle.cpp
+++ b/js/src/jit/WarpOracle.cpp
@@ -510,6 +510,11 @@ AbortReasonOr<WarpScriptSnapshot*> WarpScriptOracle::createScriptSnapshot() {
break;
}
+ case JSOp::String:
+ if (!loc.atomizeString(cx_, script_)) {
+ return abort(AbortReason::Alloc);
+ }
+ break;
case JSOp::GetName:
case JSOp::GetGName:
case JSOp::GetProp:
@@ -578,6 +583,7 @@ AbortReasonOr<WarpScriptSnapshot*> WarpScriptOracle::createScriptSnapshot() {
case JSOp::OptimizeSpreadCall:
case JSOp::Typeof:
case JSOp::TypeofExpr:
+ case JSOp::TypeofEq:
case JSOp::NewObject:
case JSOp::NewInit:
case JSOp::NewArray:
@@ -613,7 +619,6 @@ AbortReasonOr<WarpScriptSnapshot*> WarpScriptOracle::createScriptSnapshot() {
case JSOp::Int32:
case JSOp::Double:
case JSOp::BigInt:
- case JSOp::String:
case JSOp::Symbol:
case JSOp::Pop:
case JSOp::PopN:
@@ -1209,6 +1214,10 @@ bool WarpScriptOracle::replaceNurseryAndAllocSitePointers(
// If the stub data contains weak pointers then trigger a read barrier. This
// is necessary as these will now be strong references in the snapshot.
//
+ // If the stub data contains strings then atomize them. This ensures we don't
+ // try to access potentially unstable characters from a background thread and
+ // also facilitates certain optimizations.
+ //
// Also asserts non-object fields don't contain nursery pointers.
uint32_t field = 0;
@@ -1270,11 +1279,17 @@ bool WarpScriptOracle::replaceNurseryAndAllocSitePointers(
break;
}
case StubField::Type::String: {
-#ifdef DEBUG
- JSString* str =
- stubInfo->getStubField<StubField::Type::String>(stub, offset);
+ uintptr_t oldWord = stubInfo->getStubRawWord(stub, offset);
+ JSString* str = reinterpret_cast<JSString*>(oldWord);
MOZ_ASSERT(!IsInsideNursery(str));
-#endif
+ JSAtom* atom = AtomizeString(cx_, str);
+ if (!atom) {
+ return false;
+ }
+ if (atom != str) {
+ uintptr_t newWord = reinterpret_cast<uintptr_t>(atom);
+ stubInfo->replaceStubRawWord(stubDataCopy, offset, oldWord, newWord);
+ }
break;
}
case StubField::Type::Id: {
@@ -1287,10 +1302,19 @@ bool WarpScriptOracle::replaceNurseryAndAllocSitePointers(
break;
}
case StubField::Type::Value: {
-#ifdef DEBUG
- Value v = stubInfo->getStubField<StubField::Type::Value>(stub, offset);
+ Value v =
+ stubInfo->getStubField<StubField::Type::Value>(stub, offset).get();
MOZ_ASSERT_IF(v.isGCThing(), !IsInsideNursery(v.toGCThing()));
-#endif
+ if (v.isString()) {
+ Value newVal;
+ JSAtom* atom = AtomizeString(cx_, v.toString());
+ if (!atom) {
+ return false;
+ }
+ newVal.setString(atom);
+ stubInfo->replaceStubRawValueBits(stubDataCopy, offset, v.asRawBits(),
+ newVal.asRawBits());
+ }
break;
}
case StubField::Type::AllocSite: {
diff --git a/js/src/jit/arm64/MacroAssembler-arm64.cpp b/js/src/jit/arm64/MacroAssembler-arm64.cpp
index e3ec2494ff..511df5b4b2 100644
--- a/js/src/jit/arm64/MacroAssembler-arm64.cpp
+++ b/js/src/jit/arm64/MacroAssembler-arm64.cpp
@@ -569,6 +569,7 @@ void MacroAssemblerCompat::wasmLoadImpl(const wasm::MemoryAccessDesc& access,
case Scalar::Uint8Clamped:
case Scalar::BigInt64:
case Scalar::BigUint64:
+ case Scalar::Float16:
case Scalar::MaxTypedArrayViewType:
MOZ_CRASH("unexpected array type");
}
@@ -672,6 +673,7 @@ void MacroAssemblerCompat::wasmStoreImpl(const wasm::MemoryAccessDesc& access,
case Scalar::Uint8Clamped:
case Scalar::BigInt64:
case Scalar::BigUint64:
+ case Scalar::Float16:
case Scalar::MaxTypedArrayViewType:
MOZ_CRASH("unexpected array type");
}
diff --git a/js/src/jit/loong64/CodeGenerator-loong64.cpp b/js/src/jit/loong64/CodeGenerator-loong64.cpp
index 76d3047680..7f873aaa9b 100644
--- a/js/src/jit/loong64/CodeGenerator-loong64.cpp
+++ b/js/src/jit/loong64/CodeGenerator-loong64.cpp
@@ -2357,9 +2357,15 @@ void CodeGenerator::visitEffectiveAddress(LEffectiveAddress* ins) {
Register base = ToRegister(ins->base());
Register index = ToRegister(ins->index());
Register output = ToRegister(ins->output());
+ int32_t shift = Imm32::ShiftOf(mir->scale()).value;
- BaseIndex address(base, index, mir->scale(), mir->displacement());
- masm.computeEffectiveAddress(address, output);
+ if (shift) {
+ MOZ_ASSERT(shift <= 4);
+ masm.as_alsl_w(output, index, base, shift - 1);
+ } else {
+ masm.as_add_w(output, base, index);
+ }
+ masm.ma_add_w(output, output, Imm32(mir->displacement()));
}
void CodeGenerator::visitNegI(LNegI* ins) {
diff --git a/js/src/jit/moz.build b/js/src/jit/moz.build
index c49b4fcd9f..f5de984478 100644
--- a/js/src/jit/moz.build
+++ b/js/src/jit/moz.build
@@ -26,6 +26,7 @@ UNIFIED_SOURCES += [
"BaselineIC.cpp",
"BaselineJIT.cpp",
"BitSet.cpp",
+ "BranchHinting.cpp",
"BytecodeAnalysis.cpp",
"CacheIR.cpp",
"CacheIRCompiler.cpp",
diff --git a/js/src/jit/shared/AtomicOperations-shared-jit.h b/js/src/jit/shared/AtomicOperations-shared-jit.h
index ca66a6f9b9..d5303b2c60 100644
--- a/js/src/jit/shared/AtomicOperations-shared-jit.h
+++ b/js/src/jit/shared/AtomicOperations-shared-jit.h
@@ -23,6 +23,7 @@
#include <stdint.h>
#include "jit/AtomicOperationsGenerated.h"
+#include "vm/Float16.h"
#include "vm/Uint8Clamped.h"
namespace js {
@@ -405,6 +406,14 @@ inline uint8_clamped js::jit::AtomicOperations::loadSafeWhenRacy(
return uint8_clamped(loadSafeWhenRacy((uint8_t*)addr));
}
+// Clang requires a specialization for float16.
+template <>
+inline float16 js::jit::AtomicOperations::loadSafeWhenRacy(float16* addr) {
+ float16 f16;
+ f16.val = loadSafeWhenRacy((uint16_t*)addr);
+ return f16;
+}
+
} // namespace jit
} // namespace js
@@ -464,6 +473,13 @@ inline void js::jit::AtomicOperations::storeSafeWhenRacy(uint8_clamped* addr,
storeSafeWhenRacy((uint8_t*)addr, (uint8_t)val);
}
+// Clang requires a specialization for float16.
+template <>
+inline void js::jit::AtomicOperations::storeSafeWhenRacy(float16* addr,
+ float16 val) {
+ storeSafeWhenRacy((uint16_t*)addr, val.val);
+}
+
} // namespace jit
} // namespace js
diff --git a/js/src/jit/shared/LIR-shared.h b/js/src/jit/shared/LIR-shared.h
index 74c11bd91b..e7e4b0730a 100644
--- a/js/src/jit/shared/LIR-shared.h
+++ b/js/src/jit/shared/LIR-shared.h
@@ -817,19 +817,19 @@ class LConstructArrayGeneric
};
class LApplyArgsNative
- : public LCallInstructionHelper<BOX_PIECES, BOX_PIECES + 2, 2> {
+ : public LCallInstructionHelper<BOX_PIECES, BOX_PIECES + 1, 3> {
public:
LIR_HEADER(ApplyArgsNative)
- LApplyArgsNative(const LAllocation& func, const LAllocation& argc,
- const LBoxAllocation& thisv, const LDefinition& tmpObjReg,
- const LDefinition& tmpCopy)
+ LApplyArgsNative(const LAllocation& argc, const LBoxAllocation& thisv,
+ const LDefinition& tmpObjReg, const LDefinition& tmpCopy,
+ const LDefinition& tmpExtra)
: LCallInstructionHelper(classOpcode) {
- setOperand(0, func);
- setOperand(1, argc);
+ setOperand(0, argc);
setBoxOperand(ThisIndex, thisv);
setTemp(0, tmpObjReg);
setTemp(1, tmpCopy);
+ setTemp(2, tmpExtra);
}
static constexpr bool isConstructing() { return false; }
@@ -838,94 +838,94 @@ class LApplyArgsNative
uint32_t numExtraFormals() const { return mir()->numExtraFormals(); }
- const LAllocation* getFunction() { return getOperand(0); }
- const LAllocation* getArgc() { return getOperand(1); }
+ const LAllocation* getArgc() { return getOperand(0); }
- static const size_t ThisIndex = 2;
+ static const size_t ThisIndex = 1;
const LDefinition* getTempObject() { return getTemp(0); }
const LDefinition* getTempForArgCopy() { return getTemp(1); }
+ const LDefinition* getTempExtra() { return getTemp(2); }
};
class LApplyArgsObjNative
- : public LCallInstructionHelper<BOX_PIECES, BOX_PIECES + 2, 2> {
+ : public LCallInstructionHelper<BOX_PIECES, BOX_PIECES + 1, 3> {
public:
LIR_HEADER(ApplyArgsObjNative)
- LApplyArgsObjNative(const LAllocation& func, const LAllocation& argsObj,
- const LBoxAllocation& thisv, const LDefinition& tmpObjReg,
- const LDefinition& tmpCopy)
+ LApplyArgsObjNative(const LAllocation& argsObj, const LBoxAllocation& thisv,
+ const LDefinition& tmpObjReg, const LDefinition& tmpCopy,
+ const LDefinition& tmpExtra)
: LCallInstructionHelper(classOpcode) {
- setOperand(0, func);
- setOperand(1, argsObj);
+ setOperand(0, argsObj);
setBoxOperand(ThisIndex, thisv);
setTemp(0, tmpObjReg);
setTemp(1, tmpCopy);
+ setTemp(2, tmpExtra);
}
static constexpr bool isConstructing() { return false; }
MApplyArgsObj* mir() const { return mir_->toApplyArgsObj(); }
- const LAllocation* getFunction() { return getOperand(0); }
- const LAllocation* getArgsObj() { return getOperand(1); }
+ const LAllocation* getArgsObj() { return getOperand(0); }
- static const size_t ThisIndex = 2;
+ static const size_t ThisIndex = 1;
const LDefinition* getTempObject() { return getTemp(0); }
const LDefinition* getTempForArgCopy() { return getTemp(1); }
+ const LDefinition* getTempExtra() { return getTemp(2); }
// argc is mapped to the same register as argsObj: argc becomes live as
// argsObj is dying, all registers are calltemps.
- const LAllocation* getArgc() { return getOperand(1); }
+ const LAllocation* getArgc() { return getOperand(0); }
};
class LApplyArrayNative
- : public LCallInstructionHelper<BOX_PIECES, BOX_PIECES + 2, 2> {
+ : public LCallInstructionHelper<BOX_PIECES, BOX_PIECES + 1, 3> {
public:
LIR_HEADER(ApplyArrayNative)
- LApplyArrayNative(const LAllocation& func, const LAllocation& elements,
- const LBoxAllocation& thisv, const LDefinition& tmpObjReg,
- const LDefinition& tmpCopy)
+ LApplyArrayNative(const LAllocation& elements, const LBoxAllocation& thisv,
+ const LDefinition& tmpObjReg, const LDefinition& tmpCopy,
+ const LDefinition& tmpExtra)
: LCallInstructionHelper(classOpcode) {
- setOperand(0, func);
- setOperand(1, elements);
+ setOperand(0, elements);
setBoxOperand(ThisIndex, thisv);
setTemp(0, tmpObjReg);
setTemp(1, tmpCopy);
+ setTemp(2, tmpExtra);
}
static constexpr bool isConstructing() { return false; }
MApplyArray* mir() const { return mir_->toApplyArray(); }
- const LAllocation* getFunction() { return getOperand(0); }
- const LAllocation* getElements() { return getOperand(1); }
+ const LAllocation* getElements() { return getOperand(0); }
- static const size_t ThisIndex = 2;
+ static const size_t ThisIndex = 1;
const LDefinition* getTempObject() { return getTemp(0); }
const LDefinition* getTempForArgCopy() { return getTemp(1); }
+ const LDefinition* getTempExtra() { return getTemp(2); }
// argc is mapped to the same register as elements: argc becomes live as
// elements is dying, all registers are calltemps.
- const LAllocation* getArgc() { return getOperand(1); }
+ const LAllocation* getArgc() { return getOperand(0); }
};
-class LConstructArgsNative : public LCallInstructionHelper<BOX_PIECES, 3, 2> {
+class LConstructArgsNative : public LCallInstructionHelper<BOX_PIECES, 2, 3> {
public:
LIR_HEADER(ConstructArgsNative)
- LConstructArgsNative(const LAllocation& func, const LAllocation& argc,
- const LAllocation& newTarget,
- const LDefinition& tmpObjReg, const LDefinition& tmpCopy)
+ LConstructArgsNative(const LAllocation& argc, const LAllocation& newTarget,
+ const LDefinition& tmpObjReg, const LDefinition& tmpCopy,
+ const LDefinition& tmpExtra)
: LCallInstructionHelper(classOpcode) {
- setOperand(0, func);
- setOperand(1, argc);
- setOperand(2, newTarget);
+ setOperand(0, argc);
+ setOperand(1, newTarget);
setTemp(0, tmpObjReg);
setTemp(1, tmpCopy);
+ setTemp(2, tmpExtra);
}
static constexpr bool isConstructing() { return true; }
@@ -934,44 +934,44 @@ class LConstructArgsNative : public LCallInstructionHelper<BOX_PIECES, 3, 2> {
uint32_t numExtraFormals() const { return mir()->numExtraFormals(); }
- const LAllocation* getFunction() { return getOperand(0); }
- const LAllocation* getArgc() { return getOperand(1); }
- const LAllocation* getNewTarget() { return getOperand(2); }
+ const LAllocation* getArgc() { return getOperand(0); }
+ const LAllocation* getNewTarget() { return getOperand(1); }
const LDefinition* getTempObject() { return getTemp(0); }
const LDefinition* getTempForArgCopy() { return getTemp(1); }
+ const LDefinition* getTempExtra() { return getTemp(2); }
};
-class LConstructArrayNative : public LCallInstructionHelper<BOX_PIECES, 3, 2> {
+class LConstructArrayNative : public LCallInstructionHelper<BOX_PIECES, 2, 3> {
public:
LIR_HEADER(ConstructArrayNative)
- LConstructArrayNative(const LAllocation& func, const LAllocation& elements,
+ LConstructArrayNative(const LAllocation& elements,
const LAllocation& newTarget,
const LDefinition& tmpObjReg,
- const LDefinition& tmpCopy)
+ const LDefinition& tmpCopy, const LDefinition& tmpExtra)
: LCallInstructionHelper(classOpcode) {
- setOperand(0, func);
- setOperand(1, elements);
- setOperand(2, newTarget);
+ setOperand(0, elements);
+ setOperand(1, newTarget);
setTemp(0, tmpObjReg);
setTemp(1, tmpCopy);
+ setTemp(2, tmpExtra);
}
static constexpr bool isConstructing() { return true; }
MConstructArray* mir() const { return mir_->toConstructArray(); }
- const LAllocation* getFunction() { return getOperand(0); }
- const LAllocation* getElements() { return getOperand(1); }
- const LAllocation* getNewTarget() { return getOperand(2); }
+ const LAllocation* getElements() { return getOperand(0); }
+ const LAllocation* getNewTarget() { return getOperand(1); }
const LDefinition* getTempObject() { return getTemp(0); }
const LDefinition* getTempForArgCopy() { return getTemp(1); }
+ const LDefinition* getTempExtra() { return getTemp(2); }
// argc is mapped to the same register as elements: argc becomes live as
// elements is dying, all registers are calltemps.
- const LAllocation* getArgc() { return getOperand(1); }
+ const LAllocation* getArgc() { return getOperand(0); }
};
// Takes in either an integer or boolean input and tests it for truthiness.
diff --git a/js/src/jit/x64/CodeGenerator-x64.cpp b/js/src/jit/x64/CodeGenerator-x64.cpp
index 86d4bca0e0..4a5a2c3b06 100644
--- a/js/src/jit/x64/CodeGenerator-x64.cpp
+++ b/js/src/jit/x64/CodeGenerator-x64.cpp
@@ -631,6 +631,7 @@ void CodeGeneratorX64::wasmStore(const wasm::MemoryAccessDesc& access,
break;
case Scalar::Int64:
case Scalar::Simd128:
+ case Scalar::Float16:
case Scalar::Float32:
case Scalar::Float64:
case Scalar::Uint8Clamped:
diff --git a/js/src/jit/x64/Lowering-x64.cpp b/js/src/jit/x64/Lowering-x64.cpp
index 9f9b1713c2..85673e6422 100644
--- a/js/src/jit/x64/Lowering-x64.cpp
+++ b/js/src/jit/x64/Lowering-x64.cpp
@@ -322,6 +322,7 @@ void LIRGenerator::visitWasmStore(MWasmStore* ins) {
case Scalar::BigInt64:
case Scalar::BigUint64:
case Scalar::Uint8Clamped:
+ case Scalar::Float16:
case Scalar::MaxTypedArrayViewType:
MOZ_CRASH("unexpected array type");
}
diff --git a/js/src/jit/x64/MacroAssembler-x64.cpp b/js/src/jit/x64/MacroAssembler-x64.cpp
index ebc8c91eaa..c42fe844f6 100644
--- a/js/src/jit/x64/MacroAssembler-x64.cpp
+++ b/js/src/jit/x64/MacroAssembler-x64.cpp
@@ -1082,6 +1082,7 @@ void MacroAssembler::wasmLoad(const wasm::MemoryAccessDesc& access,
}
case Scalar::Int64:
MOZ_CRASH("int64 loads must use load64");
+ case Scalar::Float16:
case Scalar::BigInt64:
case Scalar::BigUint64:
case Scalar::Uint8Clamped:
@@ -1135,6 +1136,7 @@ void MacroAssembler::wasmLoadI64(const wasm::MemoryAccessDesc& access,
FaultingCodeOffset(currentOffset()));
movq(srcAddr, out.reg);
break;
+ case Scalar::Float16:
case Scalar::Float32:
case Scalar::Float64:
case Scalar::Simd128:
@@ -1199,6 +1201,7 @@ void MacroAssembler::wasmStore(const wasm::MemoryAccessDesc& access,
case Scalar::Uint8Clamped:
case Scalar::BigInt64:
case Scalar::BigUint64:
+ case Scalar::Float16:
case Scalar::MaxTypedArrayViewType:
MOZ_CRASH("unexpected array type");
}
diff --git a/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp b/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
index 692e884f06..cd32a62084 100644
--- a/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
+++ b/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
@@ -408,6 +408,8 @@ void CodeGeneratorX86Shared::visitOutOfLineLoadTypedArrayOutOfBounds(
case Scalar::BigInt64:
case Scalar::BigUint64:
case Scalar::Simd128:
+ // TODO: See Bug 1835034 for JIT support for Float16Array
+ case Scalar::Float16:
case Scalar::MaxTypedArrayViewType:
MOZ_CRASH("unexpected array type");
case Scalar::Float32:
diff --git a/js/src/jit/x86-shared/Lowering-x86-shared.cpp b/js/src/jit/x86-shared/Lowering-x86-shared.cpp
index 6d90f2f96b..e9786d4882 100644
--- a/js/src/jit/x86-shared/Lowering-x86-shared.cpp
+++ b/js/src/jit/x86-shared/Lowering-x86-shared.cpp
@@ -401,6 +401,7 @@ void LIRGenerator::visitAsmJSStoreHeap(MAsmJSStoreHeap* ins) {
case Scalar::Uint8Clamped:
case Scalar::BigInt64:
case Scalar::BigUint64:
+ case Scalar::Float16:
case Scalar::MaxTypedArrayViewType:
MOZ_CRASH("unexpected array type");
}
diff --git a/js/src/jit/x86/Lowering-x86.cpp b/js/src/jit/x86/Lowering-x86.cpp
index e958e998c2..703ede44f5 100644
--- a/js/src/jit/x86/Lowering-x86.cpp
+++ b/js/src/jit/x86/Lowering-x86.cpp
@@ -475,6 +475,7 @@ void LIRGenerator::visitWasmStore(MWasmStore* ins) {
case Scalar::Uint8Clamped:
case Scalar::BigInt64:
case Scalar::BigUint64:
+ case Scalar::Float16:
case Scalar::MaxTypedArrayViewType:
MOZ_CRASH("unexpected array type");
}
diff --git a/js/src/jit/x86/MacroAssembler-x86.cpp b/js/src/jit/x86/MacroAssembler-x86.cpp
index 232303b429..5c4f1e9901 100644
--- a/js/src/jit/x86/MacroAssembler-x86.cpp
+++ b/js/src/jit/x86/MacroAssembler-x86.cpp
@@ -1113,6 +1113,7 @@ void MacroAssembler::wasmLoad(const wasm::MemoryAccessDesc& access,
case Scalar::Uint8Clamped:
case Scalar::BigInt64:
case Scalar::BigUint64:
+ case Scalar::Float16:
case Scalar::MaxTypedArrayViewType:
MOZ_CRASH("unexpected type");
}
@@ -1197,6 +1198,7 @@ void MacroAssembler::wasmLoadI64(const wasm::MemoryAccessDesc& access,
break;
}
+ case Scalar::Float16:
case Scalar::Float32:
case Scalar::Float64:
MOZ_CRASH("non-int64 loads should use load()");
@@ -1258,6 +1260,7 @@ void MacroAssembler::wasmStore(const wasm::MemoryAccessDesc& access,
break;
case Scalar::Int64:
MOZ_CRASH("Should be handled in storeI64.");
+ case Scalar::Float16:
case Scalar::MaxTypedArrayViewType:
case Scalar::BigInt64:
case Scalar::BigUint64: