summaryrefslogtreecommitdiffstats
path: root/js/src/jit
diff options
context:
space:
mode:
Diffstat (limited to 'js/src/jit')
-rw-r--r--js/src/jit/ABIFunctionList-inl.h2
-rw-r--r--js/src/jit/BaselineBailouts.cpp26
-rw-r--r--js/src/jit/BaselineFrame.h4
-rw-r--r--js/src/jit/CacheIR.cpp15
-rw-r--r--js/src/jit/CacheIR.h6
-rw-r--r--js/src/jit/CacheIRCompiler.cpp39
-rw-r--r--js/src/jit/CacheIROps.yaml3
-rw-r--r--js/src/jit/CacheIRReader.h24
-rw-r--r--js/src/jit/CodeGenerator.cpp552
-rw-r--r--js/src/jit/CodeGenerator.h38
-rw-r--r--js/src/jit/Ion.cpp16
-rw-r--r--js/src/jit/JSJitFrameIter.cpp93
-rw-r--r--js/src/jit/JSJitFrameIter.h49
-rw-r--r--js/src/jit/JitFrames.cpp68
-rw-r--r--js/src/jit/JitFrames.h11
-rw-r--r--js/src/jit/JitOptions.cpp3
-rw-r--r--js/src/jit/JitRuntime.h30
-rw-r--r--js/src/jit/LIROps.yaml18
-rw-r--r--js/src/jit/Lowering.cpp134
-rw-r--r--js/src/jit/MIR.cpp65
-rw-r--r--js/src/jit/MIR.h3
-rw-r--r--js/src/jit/MIROps.yaml12
-rw-r--r--js/src/jit/MacroAssembler.cpp20
-rw-r--r--js/src/jit/MacroAssembler.h1
-rw-r--r--js/src/jit/PerfSpewer.cpp6
-rw-r--r--js/src/jit/ProcessExecutableMemory.cpp12
-rw-r--r--js/src/jit/Recover.cpp370
-rw-r--r--js/src/jit/Recover.h9
-rw-r--r--js/src/jit/Trampoline.cpp36
-rw-r--r--js/src/jit/TrampolineNatives.cpp274
-rw-r--r--js/src/jit/TrampolineNatives.h60
-rw-r--r--js/src/jit/VMFunctionList-inl.h1
-rw-r--r--js/src/jit/VMFunctions.cpp35
-rw-r--r--js/src/jit/VMFunctions.h4
-rw-r--r--js/src/jit/WarpCacheIRTranspiler.cpp10
-rw-r--r--js/src/jit/arm/Architecture-arm.h2
-rw-r--r--js/src/jit/arm64/Architecture-arm64.h2
-rw-r--r--js/src/jit/arm64/vixl/Cpu-vixl.cpp2
-rw-r--r--js/src/jit/loong64/Architecture-loong64.h2
-rw-r--r--js/src/jit/mips32/Architecture-mips32.h2
-rw-r--r--js/src/jit/mips64/Architecture-mips64.h2
-rw-r--r--js/src/jit/moz.build1
-rw-r--r--js/src/jit/none/Architecture-none.h2
-rw-r--r--js/src/jit/riscv64/Architecture-riscv64.h2
-rw-r--r--js/src/jit/shared/LIR-shared.h182
-rw-r--r--js/src/jit/wasm32/Architecture-wasm32.h2
-rw-r--r--js/src/jit/x86-shared/Architecture-x86-shared.h4
47 files changed, 1711 insertions, 543 deletions
diff --git a/js/src/jit/ABIFunctionList-inl.h b/js/src/jit/ABIFunctionList-inl.h
index f8a52beeff..eb2123f7a2 100644
--- a/js/src/jit/ABIFunctionList-inl.h
+++ b/js/src/jit/ABIFunctionList-inl.h
@@ -103,6 +103,8 @@ namespace jit {
_(js::ArgumentsObject::finishForIonPure) \
_(js::ArgumentsObject::finishInlineForIonPure) \
_(js::ArrayShiftMoveElements) \
+ _(js::ArraySortData::sortWithComparator) \
+ _(js::ArraySortFromJit) \
_(js::ecmaAtan2) \
_(js::ecmaHypot) \
_(js::ecmaPow) \
diff --git a/js/src/jit/BaselineBailouts.cpp b/js/src/jit/BaselineBailouts.cpp
index d916d000ed..150e16b618 100644
--- a/js/src/jit/BaselineBailouts.cpp
+++ b/js/src/jit/BaselineBailouts.cpp
@@ -517,7 +517,12 @@ void BaselineStackBuilder::setNextCallee(
//
// Also use the callee's own ICScript if we purged callee ICScripts.
icScript_ = nextCallee->nonLazyScript()->jitScript()->icScript();
+
if (trialInliningState != TrialInliningState::MonomorphicInlined) {
+ // Don't use specialized ICScripts for any of the callees if we had an
+ // inlining failure. We're now using the generic ICScript but compilation
+ // might have used the trial-inlined ICScript and these can have very
+ // different inlining graphs.
canUseTrialInlinedICScripts_ = false;
}
}
@@ -1567,6 +1572,7 @@ bool jit::BailoutIonToBaseline(JSContext* cx, JitActivation* activation,
prevFrameType == FrameType::IonJS ||
prevFrameType == FrameType::BaselineStub ||
prevFrameType == FrameType::Rectifier ||
+ prevFrameType == FrameType::TrampolineNative ||
prevFrameType == FrameType::IonICCall ||
prevFrameType == FrameType::BaselineJS ||
prevFrameType == FrameType::BaselineInterpreterEntry);
@@ -1965,14 +1971,6 @@ bool jit::FinishBailoutToBaseline(BaselineBailoutInfo* bailoutInfoArg) {
UnwindEnvironment(cx, ei, bailoutInfo->tryPC);
}
- // Check for interrupts now because we might miss an interrupt check in JIT
- // code when resuming in the prologue, after the stack/interrupt check.
- if (!cx->isExceptionPending()) {
- if (!CheckForInterrupt(cx)) {
- return false;
- }
- }
-
BailoutKind bailoutKind = *bailoutInfo->bailoutKind;
JitSpew(JitSpew_BaselineBailouts,
" Restored outerScript=(%s:%u:%u,%u) innerScript=(%s:%u:%u,%u) "
@@ -2169,7 +2167,17 @@ bool jit::FinishBailoutToBaseline(BaselineBailoutInfo* bailoutInfoArg) {
ionScript->incNumFixableBailouts();
if (ionScript->shouldInvalidate()) {
#ifdef DEBUG
- if (saveFailedICHash && !JitOptions.disableBailoutLoopCheck) {
+ // To detect bailout loops, we save a hash of the CacheIR used to
+ // compile this script, and assert that we don't recompile with the
+ // exact same inputs. Some of our bailout detection strategies, like
+ // LICM and stub folding, rely on bailing out, updating some state
+ // when we hit the baseline fallback, and using that information when
+ // we invalidate. If the frequentBailoutThreshold is set too low, we
+ // will instead invalidate the first time we bail out, so we don't
+ // have the chance to make those decisions. That doesn't happen in
+ // regular code, so we just skip bailout loop detection in that case.
+ if (saveFailedICHash && !JitOptions.disableBailoutLoopCheck &&
+ JitOptions.frequentBailoutThreshold > 1) {
outerScript->jitScript()->setFailedICHash(ionScript->icHash());
}
#endif
diff --git a/js/src/jit/BaselineFrame.h b/js/src/jit/BaselineFrame.h
index 6138332c81..f2a6811177 100644
--- a/js/src/jit/BaselineFrame.h
+++ b/js/src/jit/BaselineFrame.h
@@ -109,7 +109,9 @@ class BaselineFrame {
bool isConstructing() const {
return CalleeTokenIsConstructing(calleeToken());
}
- JSScript* script() const { return ScriptFromCalleeToken(calleeToken()); }
+ JSScript* script() const {
+ return MaybeForwardedScriptFromCalleeToken(calleeToken());
+ }
JSFunction* callee() const { return CalleeTokenToFunction(calleeToken()); }
Value calleev() const { return ObjectValue(*callee()); }
diff --git a/js/src/jit/CacheIR.cpp b/js/src/jit/CacheIR.cpp
index 68dbd6bfee..03eae14140 100644
--- a/js/src/jit/CacheIR.cpp
+++ b/js/src/jit/CacheIR.cpp
@@ -1199,7 +1199,8 @@ static ObjOperandId GuardAndLoadWindowProxyWindow(CacheIRWriter& writer,
ObjOperandId objId,
GlobalObject* windowObj) {
writer.guardClass(objId, GuardClassKind::WindowProxy);
- ObjOperandId windowObjId = writer.loadWrapperTarget(objId);
+ ObjOperandId windowObjId = writer.loadWrapperTarget(objId,
+ /*fallible = */ false);
writer.guardSpecificObject(windowObjId, windowObj);
return windowObjId;
}
@@ -1357,7 +1358,8 @@ AttachDecision GetPropIRGenerator::tryAttachCrossCompartmentWrapper(
writer.guardHasProxyHandler(objId, Wrapper::wrapperHandler(obj));
// Load the object wrapped by the CCW
- ObjOperandId wrapperTargetId = writer.loadWrapperTarget(objId);
+ ObjOperandId wrapperTargetId =
+ writer.loadWrapperTarget(objId, /*fallible = */ false);
// If the compartment of the wrapped object is different we should fail.
writer.guardCompartment(wrapperTargetId, wrappedTargetGlobal,
@@ -1468,7 +1470,8 @@ AttachDecision GetPropIRGenerator::tryAttachXrayCrossCompartmentWrapper(
writer.guardHasProxyHandler(objId, GetProxyHandler(obj));
// Load the object wrapped by the CCW
- ObjOperandId wrapperTargetId = writer.loadWrapperTarget(objId);
+ ObjOperandId wrapperTargetId =
+ writer.loadWrapperTarget(objId, /*fallible = */ false);
// Test the wrapped object's class. The properties held by xrays or their
// prototypes will be invariant for objects of a given class, except for
@@ -1578,9 +1581,9 @@ AttachDecision GetPropIRGenerator::tryAttachScriptedProxy(
writer.guardIsProxy(objId);
writer.guardHasProxyHandler(objId, &ScriptedProxyHandler::singleton);
- ValOperandId handlerValId = writer.loadScriptedProxyHandler(objId);
- ObjOperandId handlerObjId = writer.guardToObject(handlerValId);
- ObjOperandId targetObjId = writer.loadWrapperTarget(objId);
+ ObjOperandId handlerObjId = writer.loadScriptedProxyHandler(objId);
+ ObjOperandId targetObjId =
+ writer.loadWrapperTarget(objId, /*fallible =*/true);
writer.guardIsNativeObject(targetObjId);
diff --git a/js/src/jit/CacheIR.h b/js/src/jit/CacheIR.h
index 9bedbb7ddc..132070d535 100644
--- a/js/src/jit/CacheIR.h
+++ b/js/src/jit/CacheIR.h
@@ -321,6 +321,12 @@ class CallFlags {
CallFlags() = default;
explicit CallFlags(ArgFormat format) : argFormat_(format) {}
+ CallFlags(ArgFormat format, bool isConstructing, bool isSameRealm,
+ bool needsUninitializedThis)
+ : argFormat_(format),
+ isConstructing_(isConstructing),
+ isSameRealm_(isSameRealm),
+ needsUninitializedThis_(needsUninitializedThis) {}
CallFlags(bool isConstructing, bool isSpread, bool isSameRealm = false,
bool needsUninitializedThis = false)
: argFormat_(isSpread ? Spread : Standard),
diff --git a/js/src/jit/CacheIRCompiler.cpp b/js/src/jit/CacheIRCompiler.cpp
index 1467cebe08..9a26b0816c 100644
--- a/js/src/jit/CacheIRCompiler.cpp
+++ b/js/src/jit/CacheIRCompiler.cpp
@@ -2379,19 +2379,23 @@ bool CacheIRCompiler::emitGuardDynamicSlotValue(ObjOperandId objId,
return true;
}
-bool CacheIRCompiler::emitLoadScriptedProxyHandler(ValOperandId resultId,
+bool CacheIRCompiler::emitLoadScriptedProxyHandler(ObjOperandId resultId,
ObjOperandId objId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register obj = allocator.useRegister(masm, objId);
- ValueOperand output = allocator.defineValueRegister(masm, resultId);
+ Register output = allocator.defineRegister(masm, resultId);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), output);
+ Address handlerAddr(output, js::detail::ProxyReservedSlots::offsetOfSlot(
+ ScriptedProxyHandler::HANDLER_EXTRA));
+ masm.fallibleUnboxObject(handlerAddr, output, failure->label());
- masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()),
- output.scratchReg());
- masm.loadValue(
- Address(output.scratchReg(), js::detail::ProxyReservedSlots::offsetOfSlot(
- ScriptedProxyHandler::HANDLER_EXTRA)),
- output);
return true;
}
@@ -2937,14 +2941,27 @@ bool CacheIRCompiler::emitLoadEnclosingEnvironment(ObjOperandId objId,
}
bool CacheIRCompiler::emitLoadWrapperTarget(ObjOperandId objId,
- ObjOperandId resultId) {
+ ObjOperandId resultId,
+ bool fallible) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register obj = allocator.useRegister(masm, objId);
Register reg = allocator.defineRegister(masm, resultId);
+ FailurePath* failure;
+ if (fallible && !addFailurePath(&failure)) {
+ return false;
+ }
+
masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), reg);
- masm.unboxObject(
- Address(reg, js::detail::ProxyReservedSlots::offsetOfPrivateSlot()), reg);
+
+ Address targetAddr(reg,
+ js::detail::ProxyReservedSlots::offsetOfPrivateSlot());
+ if (fallible) {
+ masm.fallibleUnboxObject(targetAddr, reg, failure->label());
+ } else {
+ masm.unboxObject(targetAddr, reg);
+ }
+
return true;
}
diff --git a/js/src/jit/CacheIROps.yaml b/js/src/jit/CacheIROps.yaml
index 974404d5c0..2f3097dfd8 100644
--- a/js/src/jit/CacheIROps.yaml
+++ b/js/src/jit/CacheIROps.yaml
@@ -708,7 +708,7 @@
transpile: true
cost_estimate: 1
args:
- result: ValId
+ result: ObjId
obj: ObjId
- name: IdToStringOrSymbol
@@ -837,6 +837,7 @@
args:
obj: ObjId
result: ObjId
+ fallible: BoolImm
- name: LoadValueTag
shared: true
diff --git a/js/src/jit/CacheIRReader.h b/js/src/jit/CacheIRReader.h
index 54b298c999..59483424a3 100644
--- a/js/src/jit/CacheIRReader.h
+++ b/js/src/jit/CacheIRReader.h
@@ -129,21 +129,15 @@ class MOZ_RAII CacheIRReader {
bool isSameRealm = encoded & CallFlags::IsSameRealm;
bool needsUninitializedThis = encoded & CallFlags::NeedsUninitializedThis;
MOZ_ASSERT_IF(needsUninitializedThis, isConstructing);
- switch (format) {
- case CallFlags::Unknown:
- MOZ_CRASH("Unexpected call flags");
- case CallFlags::Standard:
- return CallFlags(isConstructing, /*isSpread =*/false, isSameRealm,
- needsUninitializedThis);
- case CallFlags::Spread:
- return CallFlags(isConstructing, /*isSpread =*/true, isSameRealm,
- needsUninitializedThis);
- default:
- // The existing non-standard argument formats (FunCall and FunApply)
- // can't be constructors.
- MOZ_ASSERT(!isConstructing);
- return CallFlags(format);
- }
+
+ // FunCall and FunApply can't be constructors.
+ MOZ_ASSERT_IF(format == CallFlags::FunCall, !isConstructing);
+ MOZ_ASSERT_IF(format == CallFlags::FunApplyArgsObj, !isConstructing);
+ MOZ_ASSERT_IF(format == CallFlags::FunApplyArray, !isConstructing);
+ MOZ_ASSERT_IF(format == CallFlags::FunApplyNullUndefined, !isConstructing);
+
+ return CallFlags(format, isConstructing, isSameRealm,
+ needsUninitializedThis);
}
uint8_t readByte() { return buffer_.readByte(); }
diff --git a/js/src/jit/CodeGenerator.cpp b/js/src/jit/CodeGenerator.cpp
index 10a69f0cb3..559ac50cc7 100644
--- a/js/src/jit/CodeGenerator.cpp
+++ b/js/src/jit/CodeGenerator.cpp
@@ -4205,13 +4205,23 @@ void CodeGenerator::visitGuardShape(LGuardShape* guard) {
}
void CodeGenerator::visitGuardFuse(LGuardFuse* guard) {
+ auto fuseIndex = guard->mir()->fuseIndex();
+ switch (fuseIndex) {
+ case RealmFuses::FuseIndex::OptimizeGetIteratorFuse:
+ addOptimizeGetIteratorFuseDependency();
+ return;
+ default:
+ // validateAndRegisterFuseDependencies doesn't have
+ // handling for this yet, actively check fuse instead.
+ break;
+ }
+
Register temp = ToRegister(guard->temp0());
Label bail;
// Bake specific fuse address for Ion code, because we won't share this code
// across realms.
- GuardFuse* fuse =
- mirGen().realm->realmFuses().getFuseByIndex(guard->mir()->fuseIndex());
+ GuardFuse* fuse = mirGen().realm->realmFuses().getFuseByIndex(fuseIndex);
masm.loadPtr(AbsoluteAddress(fuse->fuseRef()), temp);
masm.branchPtr(Assembler::NotEqual, temp, ImmPtr(nullptr), &bail);
@@ -6269,7 +6279,8 @@ void CodeGenerator::visitCallKnown(LCallKnown* call) {
UnusedStackBytesForCall(call->mir()->paddedNumStackArgs());
WrappedFunction* target = call->getSingleTarget();
- // Native single targets (except wasm) are handled by LCallNative.
+ // Native single targets (except Wasm and TrampolineNative functions) are
+ // handled by LCallNative.
MOZ_ASSERT(target->hasJitEntry());
// Missing arguments must have been explicitly appended by WarpBuilder.
@@ -6341,12 +6352,7 @@ void CodeGenerator::visitCallKnown(LCallKnown* call) {
template <typename T>
void CodeGenerator::emitCallInvokeFunction(T* apply) {
- Register objreg = ToRegister(apply->getTempObject());
-
- // Push the space used by the arguments.
- masm.moveStackPtrTo(objreg);
-
- pushArg(objreg); // argv.
+ pushArg(masm.getStackPointer()); // argv.
pushArg(ToRegister(apply->getArgc())); // argc.
pushArg(Imm32(apply->mir()->ignoresReturnValue())); // ignoresReturnValue.
pushArg(Imm32(apply->mir()->isConstructing())); // isConstructing.
@@ -6370,7 +6376,13 @@ void CodeGenerator::emitAllocateSpaceForApply(Register argcreg,
"Stack padding assumes that the frameSize is correct");
MOZ_ASSERT(JitStackValueAlignment == 2);
Label noPaddingNeeded;
- // if the number of arguments is odd, then we do not need any padding.
+ // If the number of arguments is odd, then we do not need any padding.
+ //
+ // Note: The |JitStackValueAlignment == 2| condition requires that the
+ // overall number of values on the stack is even. When we have an odd number
+ // of arguments, we don't need any padding, because the |thisValue| is
+ // pushed after the arguments, so the overall number of values on the stack
+ // is even.
masm.branchTestPtr(Assembler::NonZero, argcreg, Imm32(1), &noPaddingNeeded);
masm.addPtr(Imm32(1), scratch);
masm.bind(&noPaddingNeeded);
@@ -6382,13 +6394,13 @@ void CodeGenerator::emitAllocateSpaceForApply(Register argcreg,
masm.subFromStackPtr(scratch);
#ifdef DEBUG
- // Put a magic value in the space reserved for padding. Note, this code
- // cannot be merged with the previous test, as not all architectures can
- // write below their stack pointers.
+ // Put a magic value in the space reserved for padding. Note, this code cannot
+ // be merged with the previous test, as not all architectures can write below
+ // their stack pointers.
if (JitStackValueAlignment > 1) {
MOZ_ASSERT(JitStackValueAlignment == 2);
Label noPaddingNeeded;
- // if the number of arguments is odd, then we do not need any padding.
+ // If the number of arguments is odd, then we do not need any padding.
masm.branchTestPtr(Assembler::NonZero, argcreg, Imm32(1), &noPaddingNeeded);
BaseValueIndex dstPtr(masm.getStackPointer(), argcreg);
masm.storeValue(MagicValue(JS_ARG_POISON), dstPtr);
@@ -6403,8 +6415,8 @@ void CodeGenerator::emitAllocateSpaceForConstructAndPushNewTarget(
Register argcreg, Register newTargetAndScratch) {
// Align the JitFrameLayout on the JitStackAlignment. Contrary to
// |emitAllocateSpaceForApply()|, we're always pushing a magic value, because
- // we can't write to |newTargetAndScratch| before |new.target| has
- // been pushed onto the stack.
+ // we can't write to |newTargetAndScratch| before |new.target| has been pushed
+ // onto the stack.
if (JitStackValueAlignment > 1) {
MOZ_ASSERT(frameSize() % JitStackAlignment == 0,
"Stack padding assumes that the frameSize is correct");
@@ -6412,6 +6424,12 @@ void CodeGenerator::emitAllocateSpaceForConstructAndPushNewTarget(
Label noPaddingNeeded;
// If the number of arguments is even, then we do not need any padding.
+ //
+ // Note: The |JitStackValueAlignment == 2| condition requires that the
+ // overall number of values on the stack is even. When we have an even
+ // number of arguments, we don't need any padding, because |new.target| is
+ // is pushed before the arguments and |thisValue| is pushed after all
+ // arguments, so the overall number of values on the stack is even.
masm.branchTestPtr(Assembler::Zero, argcreg, Imm32(1), &noPaddingNeeded);
masm.pushValue(MagicValue(JS_ARG_POISON));
masm.bind(&noPaddingNeeded);
@@ -6437,9 +6455,8 @@ void CodeGenerator::emitCopyValuesForApply(Register argvSrcBase,
Label loop;
masm.bind(&loop);
- // As argvIndex is off by 1, and we use the decBranchPtr instruction
- // to loop back, we have to substract the size of the word which are
- // copied.
+ // As argvIndex is off by 1, and we use the decBranchPtr instruction to loop
+ // back, we have to substract the size of the word which are copied.
BaseValueIndex srcPtr(argvSrcBase, argvIndex,
int32_t(argvSrcOffset) - sizeof(void*));
BaseValueIndex dstPtr(masm.getStackPointer(), argvIndex,
@@ -6488,6 +6505,9 @@ void CodeGenerator::emitPushArguments(Register argcreg, Register scratch,
// clang-format on
// Compute the source and destination offsets into the stack.
+ //
+ // The |extraFormals| parameter is used when copying rest-parameters and
+ // allows to skip the initial parameters before the actual rest-parameters.
Register argvSrcBase = FramePointer;
size_t argvSrcOffset =
JitFrameLayout::offsetOfActualArgs() + extraFormals * sizeof(JS::Value);
@@ -6500,17 +6520,18 @@ void CodeGenerator::emitPushArguments(Register argcreg, Register scratch,
emitCopyValuesForApply(argvSrcBase, argvIndex, copyreg, argvSrcOffset,
argvDstOffset);
- // Join with all arguments copied and the extra stack usage computed.
+ // Join with all arguments copied.
masm.bind(&end);
}
-void CodeGenerator::emitPushArguments(LApplyArgsGeneric* apply,
- Register scratch) {
- // Holds the function nargs. Initially the number of args to the caller.
+void CodeGenerator::emitPushArguments(LApplyArgsGeneric* apply) {
+ // Holds the function nargs.
Register argcreg = ToRegister(apply->getArgc());
Register copyreg = ToRegister(apply->getTempObject());
+ Register scratch = ToRegister(apply->getTempForArgCopy());
uint32_t extraFormals = apply->numExtraFormals();
+ // Allocate space on the stack for arguments.
emitAllocateSpaceForApply(argcreg, scratch);
emitPushArguments(argcreg, scratch, copyreg, extraFormals);
@@ -6519,22 +6540,21 @@ void CodeGenerator::emitPushArguments(LApplyArgsGeneric* apply,
masm.pushValue(ToValue(apply, LApplyArgsGeneric::ThisIndex));
}
-void CodeGenerator::emitPushArguments(LApplyArgsObj* apply, Register scratch) {
- // argc and argsObj are mapped to the same calltemp register.
- MOZ_ASSERT(apply->getArgsObj() == apply->getArgc());
-
- Register tmpArgc = ToRegister(apply->getTempObject());
+void CodeGenerator::emitPushArguments(LApplyArgsObj* apply) {
Register argsObj = ToRegister(apply->getArgsObj());
+ Register tmpArgc = ToRegister(apply->getTempObject());
+ Register scratch = ToRegister(apply->getTempForArgCopy());
+
+ // argc and argsObj are mapped to the same calltemp register.
+ MOZ_ASSERT(argsObj == ToRegister(apply->getArgc()));
// Load argc into tmpArgc.
- Address lengthAddr(argsObj, ArgumentsObject::getInitialLengthSlotOffset());
- masm.unboxInt32(lengthAddr, tmpArgc);
- masm.rshift32(Imm32(ArgumentsObject::PACKED_BITS_COUNT), tmpArgc);
+ masm.loadArgumentsObjectLength(argsObj, tmpArgc);
- // Allocate space on the stack for arguments. This modifies scratch.
+ // Allocate space on the stack for arguments.
emitAllocateSpaceForApply(tmpArgc, scratch);
- // Load arguments data
+ // Load arguments data.
masm.loadPrivate(Address(argsObj, ArgumentsObject::getDataSlotOffset()),
argsObj);
size_t argsSrcOffset = ArgumentsData::offsetOfArgs();
@@ -6543,6 +6563,7 @@ void CodeGenerator::emitPushArguments(LApplyArgsObj* apply, Register scratch) {
// After this call, the argsObj register holds the argument count instead.
emitPushArrayAsArguments(tmpArgc, argsObj, scratch, argsSrcOffset);
+ // Push |this|.
masm.pushValue(ToValue(apply, LApplyArgsObj::ThisIndex));
}
@@ -6566,69 +6587,72 @@ void CodeGenerator::emitPushArrayAsArguments(Register tmpArgc,
// Skip the copy of arguments if there are none.
masm.branchTestPtr(Assembler::Zero, tmpArgc, tmpArgc, &noCopy);
+ {
+ // Copy the values. This code is skipped entirely if there are no values.
+ size_t argvDstOffset = 0;
- // Copy the values. This code is skipped entirely if there are
- // no values.
- size_t argvDstOffset = 0;
-
- Register argvSrcBase = srcBaseAndArgc;
- Register copyreg = scratch;
-
- masm.push(tmpArgc);
- Register argvIndex = tmpArgc;
- argvDstOffset += sizeof(void*);
+ Register argvSrcBase = srcBaseAndArgc;
- // Copy
- emitCopyValuesForApply(argvSrcBase, argvIndex, copyreg, argvSrcOffset,
- argvDstOffset);
+ // Stash away |tmpArgc| and adjust argvDstOffset accordingly.
+ masm.push(tmpArgc);
+ Register argvIndex = tmpArgc;
+ argvDstOffset += sizeof(void*);
- // Restore.
- masm.pop(srcBaseAndArgc); // srcBaseAndArgc now contains argc.
- masm.jump(&epilogue);
+ // Copy
+ emitCopyValuesForApply(argvSrcBase, argvIndex, scratch, argvSrcOffset,
+ argvDstOffset);
- // Clear argc if we skipped the copy step.
+ // Restore.
+ masm.pop(srcBaseAndArgc); // srcBaseAndArgc now contains argc.
+ masm.jump(&epilogue);
+ }
masm.bind(&noCopy);
- masm.movePtr(ImmWord(0), srcBaseAndArgc);
+ {
+ // Clear argc if we skipped the copy step.
+ masm.movePtr(ImmWord(0), srcBaseAndArgc);
+ }
- // Join with all arguments copied and the extra stack usage computed.
+ // Join with all arguments copied.
// Note, "srcBase" has become "argc".
masm.bind(&epilogue);
}
-void CodeGenerator::emitPushArguments(LApplyArrayGeneric* apply,
- Register scratch) {
+void CodeGenerator::emitPushArguments(LApplyArrayGeneric* apply) {
+ Register elements = ToRegister(apply->getElements());
Register tmpArgc = ToRegister(apply->getTempObject());
- Register elementsAndArgc = ToRegister(apply->getElements());
+ Register scratch = ToRegister(apply->getTempForArgCopy());
+
+ // argc and elements are mapped to the same calltemp register.
+ MOZ_ASSERT(elements == ToRegister(apply->getArgc()));
// Invariants guarded in the caller:
// - the array is not too long
// - the array length equals its initialized length
// The array length is our argc for the purposes of allocating space.
- Address length(ToRegister(apply->getElements()),
- ObjectElements::offsetOfLength());
- masm.load32(length, tmpArgc);
+ masm.load32(Address(elements, ObjectElements::offsetOfLength()), tmpArgc);
// Allocate space for the values.
emitAllocateSpaceForApply(tmpArgc, scratch);
// After this call "elements" has become "argc".
size_t elementsOffset = 0;
- emitPushArrayAsArguments(tmpArgc, elementsAndArgc, scratch, elementsOffset);
+ emitPushArrayAsArguments(tmpArgc, elements, scratch, elementsOffset);
// Push |this|.
masm.pushValue(ToValue(apply, LApplyArrayGeneric::ThisIndex));
}
-void CodeGenerator::emitPushArguments(LConstructArgsGeneric* construct,
- Register scratch) {
- MOZ_ASSERT(scratch == ToRegister(construct->getNewTarget()));
-
- // Holds the function nargs. Initially the number of args to the caller.
+void CodeGenerator::emitPushArguments(LConstructArgsGeneric* construct) {
+ // Holds the function nargs.
Register argcreg = ToRegister(construct->getArgc());
Register copyreg = ToRegister(construct->getTempObject());
+ Register scratch = ToRegister(construct->getTempForArgCopy());
uint32_t extraFormals = construct->numExtraFormals();
+ // newTarget and scratch are mapped to the same calltemp register.
+ MOZ_ASSERT(scratch == ToRegister(construct->getNewTarget()));
+
// Allocate space for the values.
// After this call "newTarget" has become "scratch".
emitAllocateSpaceForConstructAndPushNewTarget(argcreg, scratch);
@@ -6639,29 +6663,31 @@ void CodeGenerator::emitPushArguments(LConstructArgsGeneric* construct,
masm.pushValue(ToValue(construct, LConstructArgsGeneric::ThisIndex));
}
-void CodeGenerator::emitPushArguments(LConstructArrayGeneric* construct,
- Register scratch) {
- MOZ_ASSERT(scratch == ToRegister(construct->getNewTarget()));
-
+void CodeGenerator::emitPushArguments(LConstructArrayGeneric* construct) {
+ Register elements = ToRegister(construct->getElements());
Register tmpArgc = ToRegister(construct->getTempObject());
- Register elementsAndArgc = ToRegister(construct->getElements());
+ Register scratch = ToRegister(construct->getTempForArgCopy());
+
+ // argc and elements are mapped to the same calltemp register.
+ MOZ_ASSERT(elements == ToRegister(construct->getArgc()));
+
+ // newTarget and scratch are mapped to the same calltemp register.
+ MOZ_ASSERT(scratch == ToRegister(construct->getNewTarget()));
// Invariants guarded in the caller:
// - the array is not too long
// - the array length equals its initialized length
// The array length is our argc for the purposes of allocating space.
- Address length(ToRegister(construct->getElements()),
- ObjectElements::offsetOfLength());
- masm.load32(length, tmpArgc);
+ masm.load32(Address(elements, ObjectElements::offsetOfLength()), tmpArgc);
// Allocate space for the values.
+ // After this call "newTarget" has become "scratch".
emitAllocateSpaceForConstructAndPushNewTarget(tmpArgc, scratch);
- // After this call "elements" has become "argc" and "newTarget" has become
- // "scratch".
+ // After this call "elements" has become "argc".
size_t elementsOffset = 0;
- emitPushArrayAsArguments(tmpArgc, elementsAndArgc, scratch, elementsOffset);
+ emitPushArrayAsArguments(tmpArgc, elements, scratch, elementsOffset);
// Push |this|.
masm.pushValue(ToValue(construct, LConstructArrayGeneric::ThisIndex));
@@ -6682,43 +6708,24 @@ void CodeGenerator::emitApplyGeneric(T* apply) {
// Copy the arguments of the current function.
//
- // In the case of ApplyArray, ConstructArray, or ApplyArgsObj, also
- // compute argc. The argc register and the elements/argsObj register
- // are the same; argc must not be referenced before the call to
- // emitPushArguments() and elements/argsObj must not be referenced
- // after it returns.
+ // In the case of ApplyArray, ConstructArray, or ApplyArgsObj, also compute
+ // argc. The argc register and the elements/argsObj register are the same;
+ // argc must not be referenced before the call to emitPushArguments() and
+ // elements/argsObj must not be referenced after it returns.
//
- // In the case of ConstructArray or ConstructArgs, also overwrite newTarget
- // with scratch; newTarget must not be referenced after this point.
+ // In the case of ConstructArray or ConstructArgs, also overwrite newTarget;
+ // newTarget must not be referenced after this point.
//
// objreg is dead across this call.
- emitPushArguments(apply, scratch);
+ emitPushArguments(apply);
masm.checkStackAlignment();
bool constructing = apply->mir()->isConstructing();
- // If the function is native, only emit the call to InvokeFunction.
- if (apply->hasSingleTarget() &&
- apply->getSingleTarget()->isNativeWithoutJitEntry()) {
- emitCallInvokeFunction(apply);
-
-#ifdef DEBUG
- // Native constructors are guaranteed to return an Object value, so we never
- // have to replace a primitive result with the previously allocated Object
- // from CreateThis.
- if (constructing) {
- Label notPrimitive;
- masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
- &notPrimitive);
- masm.assumeUnreachable("native constructors don't return primitives");
- masm.bind(&notPrimitive);
- }
-#endif
-
- emitRestoreStackPointerFromFP();
- return;
- }
+ // If the function is native, the call is compiled through emitApplyNative.
+ MOZ_ASSERT_IF(apply->hasSingleTarget(),
+ !apply->getSingleTarget()->isNativeWithoutJitEntry());
Label end, invoke;
@@ -6812,8 +6819,8 @@ void CodeGenerator::emitApplyGeneric(T* apply) {
masm.bind(&end);
- // If the return value of the constructing function is Primitive,
- // replace the return value with the Object from CreateThis.
+ // If the return value of the constructing function is Primitive, replace the
+ // return value with the Object from CreateThis.
if (constructing) {
Label notPrimitive;
masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
@@ -6833,17 +6840,200 @@ void CodeGenerator::emitApplyGeneric(T* apply) {
emitRestoreStackPointerFromFP();
}
-void CodeGenerator::visitApplyArgsGeneric(LApplyArgsGeneric* apply) {
+template <typename T>
+void CodeGenerator::emitCallInvokeNativeFunction(T* apply) {
+ pushArg(masm.getStackPointer()); // argv.
+ pushArg(ToRegister(apply->getArgc())); // argc.
+ pushArg(Imm32(apply->mir()->ignoresReturnValue())); // ignoresReturnValue.
+ pushArg(Imm32(apply->mir()->isConstructing())); // isConstructing.
+
+ using Fn =
+ bool (*)(JSContext*, bool, bool, uint32_t, Value*, MutableHandleValue);
+ callVM<Fn, jit::InvokeNativeFunction>(apply);
+}
+
+template <typename T>
+void CodeGenerator::emitPushNativeArguments(T* apply) {
+ Register argc = ToRegister(apply->getArgc());
+ Register tmpArgc = ToRegister(apply->getTempObject());
+ Register scratch = ToRegister(apply->getTempForArgCopy());
+ uint32_t extraFormals = apply->numExtraFormals();
+
+ // Push arguments.
+ Label noCopy;
+ masm.branchTestPtr(Assembler::Zero, argc, argc, &noCopy);
+ {
+ // Use scratch register to calculate stack space (no padding needed).
+ masm.movePtr(argc, scratch);
+
+ // Reserve space for copying the arguments.
+ NativeObject::elementsSizeMustNotOverflow();
+ masm.lshiftPtr(Imm32(ValueShift), scratch);
+ masm.subFromStackPtr(scratch);
+
+ // Compute the source and destination offsets into the stack.
+ Register argvSrcBase = FramePointer;
+ size_t argvSrcOffset =
+ JitFrameLayout::offsetOfActualArgs() + extraFormals * sizeof(JS::Value);
+ size_t argvDstOffset = 0;
+
+ Register argvIndex = tmpArgc;
+ masm.move32(argc, argvIndex);
+
+ // Copy arguments.
+ emitCopyValuesForApply(argvSrcBase, argvIndex, scratch, argvSrcOffset,
+ argvDstOffset);
+ }
+ masm.bind(&noCopy);
+}
+
+template <typename T>
+void CodeGenerator::emitPushArrayAsNativeArguments(T* apply) {
+ Register argc = ToRegister(apply->getArgc());
+ Register elements = ToRegister(apply->getElements());
+ Register tmpArgc = ToRegister(apply->getTempObject());
+ Register scratch = ToRegister(apply->getTempForArgCopy());
+
+ // NB: argc and elements are mapped to the same register.
+ MOZ_ASSERT(argc == elements);
+
+ // Invariants guarded in the caller:
+ // - the array is not too long
+ // - the array length equals its initialized length
+
+ // The array length is our argc.
+ masm.load32(Address(elements, ObjectElements::offsetOfLength()), tmpArgc);
+
+ // Skip the copy of arguments if there are none.
+ Label noCopy;
+ masm.branchTestPtr(Assembler::Zero, tmpArgc, tmpArgc, &noCopy);
+ {
+ // |tmpArgc| is off-by-one, so adjust the offset accordingly.
+ BaseObjectElementIndex srcPtr(elements, tmpArgc,
+ -int32_t(sizeof(JS::Value)));
+
+ Label loop;
+ masm.bind(&loop);
+ masm.pushValue(srcPtr, scratch);
+ masm.decBranchPtr(Assembler::NonZero, tmpArgc, Imm32(1), &loop);
+ }
+ masm.bind(&noCopy);
+
+ // Set argc in preparation for emitCallInvokeNativeFunction.
+ masm.load32(Address(elements, ObjectElements::offsetOfLength()), argc);
+}
+
+void CodeGenerator::emitPushArguments(LApplyArgsNative* apply) {
+ emitPushNativeArguments(apply);
+}
+
+void CodeGenerator::emitPushArguments(LApplyArrayNative* apply) {
+ emitPushArrayAsNativeArguments(apply);
+}
+
+void CodeGenerator::emitPushArguments(LConstructArgsNative* construct) {
+ emitPushNativeArguments(construct);
+}
+
+void CodeGenerator::emitPushArguments(LConstructArrayNative* construct) {
+ emitPushArrayAsNativeArguments(construct);
+}
+
+void CodeGenerator::emitPushArguments(LApplyArgsObjNative* apply) {
+ Register argc = ToRegister(apply->getArgc());
+ Register argsObj = ToRegister(apply->getArgsObj());
+ Register tmpArgc = ToRegister(apply->getTempObject());
+ Register scratch = ToRegister(apply->getTempForArgCopy());
+
+ // NB: argc and argsObj are mapped to the same register.
+ MOZ_ASSERT(argc == argsObj);
+
+ // Load argc into tmpArgc.
+ masm.loadArgumentsObjectLength(argsObj, tmpArgc);
+
+ // Push arguments.
+ Label noCopy, epilogue;
+ masm.branchTestPtr(Assembler::Zero, tmpArgc, tmpArgc, &noCopy);
+ {
+ // Use scratch register to calculate stack space (no padding needed).
+ masm.movePtr(tmpArgc, scratch);
+
+ // Reserve space for copying the arguments.
+ NativeObject::elementsSizeMustNotOverflow();
+ masm.lshiftPtr(Imm32(ValueShift), scratch);
+ masm.subFromStackPtr(scratch);
+
+ // Load arguments data.
+ Register argvSrcBase = argsObj;
+ masm.loadPrivate(Address(argsObj, ArgumentsObject::getDataSlotOffset()),
+ argvSrcBase);
+ size_t argvSrcOffset = ArgumentsData::offsetOfArgs();
+ size_t argvDstOffset = 0;
+
+ // Stash away |tmpArgc| and adjust argvDstOffset accordingly.
+ masm.push(tmpArgc);
+ argvDstOffset += sizeof(void*);
+
+ // Copy the values.
+ emitCopyValuesForApply(argvSrcBase, tmpArgc, scratch, argvSrcOffset,
+ argvDstOffset);
+
+ // Set argc in preparation for emitCallInvokeNativeFunction.
+ masm.pop(argc);
+ masm.jump(&epilogue);
+ }
+ masm.bind(&noCopy);
+ {
+ // Set argc in preparation for emitCallInvokeNativeFunction.
+ masm.movePtr(ImmWord(0), argc);
+ }
+ masm.bind(&epilogue);
+}
+
+template <typename T>
+void CodeGenerator::emitApplyNative(T* apply) {
+ MOZ_ASSERT(apply->mir()->getSingleTarget()->isNativeWithoutJitEntry());
+
+ constexpr bool isConstructing = T::isConstructing();
+ MOZ_ASSERT(isConstructing == apply->mir()->isConstructing(),
+ "isConstructing condition must be consistent");
+
+ // Push newTarget.
+ if constexpr (isConstructing) {
+ masm.pushValue(JSVAL_TYPE_OBJECT, ToRegister(apply->getNewTarget()));
+ }
+
+ // Push arguments.
+ emitPushArguments(apply);
+
+ // Push |this|.
+ if constexpr (isConstructing) {
+ masm.pushValue(MagicValue(JS_IS_CONSTRUCTING));
+ } else {
+ masm.pushValue(ToValue(apply, T::ThisIndex));
+ }
+
+ // Push callee.
+ masm.pushValue(JSVAL_TYPE_OBJECT, ToRegister(apply->getFunction()));
+
+ // Call the native function.
+ emitCallInvokeNativeFunction(apply);
+
+ // Pop arguments and continue.
+ emitRestoreStackPointerFromFP();
+}
+
+template <typename T>
+void CodeGenerator::emitApplyArgsGuard(T* apply) {
LSnapshot* snapshot = apply->snapshot();
Register argcreg = ToRegister(apply->getArgc());
// Ensure that we have a reasonable number of arguments.
bailoutCmp32(Assembler::Above, argcreg, Imm32(JIT_ARGS_LENGTH_MAX), snapshot);
-
- emitApplyGeneric(apply);
}
-void CodeGenerator::visitApplyArgsObj(LApplyArgsObj* apply) {
+template <typename T>
+void CodeGenerator::emitApplyArgsObjGuard(T* apply) {
Register argsObj = ToRegister(apply->getArgsObj());
Register temp = ToRegister(apply->getTempObject());
@@ -6851,16 +7041,15 @@ void CodeGenerator::visitApplyArgsObj(LApplyArgsObj* apply) {
masm.loadArgumentsObjectLength(argsObj, temp, &bail);
masm.branch32(Assembler::Above, temp, Imm32(JIT_ARGS_LENGTH_MAX), &bail);
bailoutFrom(&bail, apply->snapshot());
-
- emitApplyGeneric(apply);
}
-void CodeGenerator::visitApplyArrayGeneric(LApplyArrayGeneric* apply) {
+template <typename T>
+void CodeGenerator::emitApplyArrayGuard(T* apply) {
LSnapshot* snapshot = apply->snapshot();
+ Register elements = ToRegister(apply->getElements());
Register tmp = ToRegister(apply->getTempObject());
- Address length(ToRegister(apply->getElements()),
- ObjectElements::offsetOfLength());
+ Address length(elements, ObjectElements::offsetOfLength());
masm.load32(length, tmp);
// Ensure that we have a reasonable number of arguments.
@@ -6868,43 +7057,60 @@ void CodeGenerator::visitApplyArrayGeneric(LApplyArrayGeneric* apply) {
// Ensure that the array does not contain an uninitialized tail.
- Address initializedLength(ToRegister(apply->getElements()),
+ Address initializedLength(elements,
ObjectElements::offsetOfInitializedLength());
masm.sub32(initializedLength, tmp);
bailoutCmp32(Assembler::NotEqual, tmp, Imm32(0), snapshot);
+}
+void CodeGenerator::visitApplyArgsGeneric(LApplyArgsGeneric* apply) {
+ emitApplyArgsGuard(apply);
emitApplyGeneric(apply);
}
-void CodeGenerator::visitConstructArgsGeneric(LConstructArgsGeneric* lir) {
- LSnapshot* snapshot = lir->snapshot();
- Register argcreg = ToRegister(lir->getArgc());
+void CodeGenerator::visitApplyArgsObj(LApplyArgsObj* apply) {
+ emitApplyArgsObjGuard(apply);
+ emitApplyGeneric(apply);
+}
- // Ensure that we have a reasonable number of arguments.
- bailoutCmp32(Assembler::Above, argcreg, Imm32(JIT_ARGS_LENGTH_MAX), snapshot);
+void CodeGenerator::visitApplyArrayGeneric(LApplyArrayGeneric* apply) {
+ emitApplyArrayGuard(apply);
+ emitApplyGeneric(apply);
+}
+void CodeGenerator::visitConstructArgsGeneric(LConstructArgsGeneric* lir) {
+ emitApplyArgsGuard(lir);
emitApplyGeneric(lir);
}
void CodeGenerator::visitConstructArrayGeneric(LConstructArrayGeneric* lir) {
- LSnapshot* snapshot = lir->snapshot();
- Register tmp = ToRegister(lir->getTempObject());
+ emitApplyArrayGuard(lir);
+ emitApplyGeneric(lir);
+}
- Address length(ToRegister(lir->getElements()),
- ObjectElements::offsetOfLength());
- masm.load32(length, tmp);
+void CodeGenerator::visitApplyArgsNative(LApplyArgsNative* lir) {
+ emitApplyArgsGuard(lir);
+ emitApplyNative(lir);
+}
- // Ensure that we have a reasonable number of arguments.
- bailoutCmp32(Assembler::Above, tmp, Imm32(JIT_ARGS_LENGTH_MAX), snapshot);
+void CodeGenerator::visitApplyArgsObjNative(LApplyArgsObjNative* lir) {
+ emitApplyArgsObjGuard(lir);
+ emitApplyNative(lir);
+}
- // Ensure that the array does not contain an uninitialized tail.
+void CodeGenerator::visitApplyArrayNative(LApplyArrayNative* lir) {
+ emitApplyArrayGuard(lir);
+ emitApplyNative(lir);
+}
- Address initializedLength(ToRegister(lir->getElements()),
- ObjectElements::offsetOfInitializedLength());
- masm.sub32(initializedLength, tmp);
- bailoutCmp32(Assembler::NotEqual, tmp, Imm32(0), snapshot);
+void CodeGenerator::visitConstructArgsNative(LConstructArgsNative* lir) {
+ emitApplyArgsGuard(lir);
+ emitApplyNative(lir);
+}
- emitApplyGeneric(lir);
+void CodeGenerator::visitConstructArrayNative(LConstructArrayNative* lir) {
+ emitApplyArrayGuard(lir);
+ emitApplyNative(lir);
}
void CodeGenerator::visitBail(LBail* lir) { bailout(lir->snapshot()); }
@@ -15460,15 +15666,37 @@ void CodeGenerator::validateAndRegisterFuseDependencies(JSContext* cx,
if (!hasSeenObjectEmulateUndefinedFuse.intact()) {
JitSpew(JitSpew_Codegen,
- "tossing compilation; fuse dependency no longer valid\n");
+ "tossing compilation; hasSeenObjectEmulateUndefinedFuse fuse "
+ "dependency no longer valid\n");
*isValid = false;
return;
}
if (!hasSeenObjectEmulateUndefinedFuse.addFuseDependency(cx, script)) {
- JitSpew(
- JitSpew_Codegen,
- "tossing compilation; failed to register script dependency\n");
+ JitSpew(JitSpew_Codegen,
+ "tossing compilation; failed to register "
+ "hasSeenObjectEmulateUndefinedFuse script dependency\n");
+ *isValid = false;
+ return;
+ }
+ break;
+ }
+
+ case FuseDependencyKind::OptimizeGetIteratorFuse: {
+ auto& optimizeGetIteratorFuse =
+ cx->realm()->realmFuses.optimizeGetIteratorFuse;
+ if (!optimizeGetIteratorFuse.intact()) {
+ JitSpew(JitSpew_Codegen,
+ "tossing compilation; optimizeGetIteratorFuse fuse "
+ "dependency no longer valid\n");
+ *isValid = false;
+ return;
+ }
+
+ if (!optimizeGetIteratorFuse.addFuseDependency(cx, script)) {
+ JitSpew(JitSpew_Codegen,
+ "tossing compilation; failed to register "
+ "optimizeGetIteratorFuse script dependency\n");
*isValid = false;
return;
}
@@ -15837,15 +16065,16 @@ void CodeGenerator::visitMegamorphicSetElement(LMegamorphicSetElement* lir) {
void CodeGenerator::visitLoadScriptedProxyHandler(
LLoadScriptedProxyHandler* ins) {
- const Register obj = ToRegister(ins->getOperand(0));
- ValueOperand output = ToOutValue(ins);
+ Register obj = ToRegister(ins->getOperand(0));
+ Register output = ToRegister(ins->output());
- masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()),
- output.scratchReg());
- masm.loadValue(
- Address(output.scratchReg(), js::detail::ProxyReservedSlots::offsetOfSlot(
- ScriptedProxyHandler::HANDLER_EXTRA)),
- output);
+ masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), output);
+
+ Label bail;
+ Address handlerAddr(output, js::detail::ProxyReservedSlots::offsetOfSlot(
+ ScriptedProxyHandler::HANDLER_EXTRA));
+ masm.fallibleUnboxObject(handlerAddr, output, &bail);
+ bailoutFrom(&bail, ins->snapshot());
}
#ifdef JS_PUNBOX64
@@ -19861,9 +20090,17 @@ void CodeGenerator::visitLoadWrapperTarget(LLoadWrapperTarget* lir) {
Register output = ToRegister(lir->output());
masm.loadPtr(Address(object, ProxyObject::offsetOfReservedSlots()), output);
- masm.unboxObject(
- Address(output, js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
- output);
+
+ // Bail for revoked proxies.
+ Label bail;
+ Address targetAddr(output,
+ js::detail::ProxyReservedSlots::offsetOfPrivateSlot());
+ if (lir->mir()->fallible()) {
+ masm.fallibleUnboxObject(targetAddr, output, &bail);
+ bailoutFrom(&bail, lir->snapshot());
+ } else {
+ masm.unboxObject(targetAddr, output);
+ }
}
void CodeGenerator::visitGuardHasGetterSetter(LGuardHasGetterSetter* lir) {
@@ -20642,9 +20879,20 @@ void CodeGenerator::visitWasmAnyRefFromJSString(LWasmAnyRefFromJSString* lir) {
}
void CodeGenerator::visitWasmNewI31Ref(LWasmNewI31Ref* lir) {
- Register value = ToRegister(lir->value());
- Register output = ToRegister(lir->output());
- masm.truncate32ToWasmI31Ref(value, output);
+ if (lir->value()->isConstant()) {
+ // i31ref are often created with constants. If that's the case we will
+ // do the operation statically here. This is similar to what is done
+ // in masm.truncate32ToWasmI31Ref.
+ Register output = ToRegister(lir->output());
+ uint32_t value =
+ static_cast<uint32_t>(lir->value()->toConstant()->toInt32());
+ uintptr_t ptr = wasm::AnyRef::fromUint32Truncate(value).rawValue();
+ masm.movePtr(ImmWord(ptr), output);
+ } else {
+ Register value = ToRegister(lir->value());
+ Register output = ToRegister(lir->output());
+ masm.truncate32ToWasmI31Ref(value, output);
+ }
}
void CodeGenerator::visitWasmI31RefGet(LWasmI31RefGet* lir) {
diff --git a/js/src/jit/CodeGenerator.h b/js/src/jit/CodeGenerator.h
index 274c876e4d..282771a79e 100644
--- a/js/src/jit/CodeGenerator.h
+++ b/js/src/jit/CodeGenerator.h
@@ -239,11 +239,34 @@ class CodeGenerator final : public CodeGeneratorSpecific {
uint32_t extraFormals);
void emitPushArrayAsArguments(Register tmpArgc, Register srcBaseAndArgc,
Register scratch, size_t argvSrcOffset);
- void emitPushArguments(LApplyArgsGeneric* apply, Register scratch);
- void emitPushArguments(LApplyArgsObj* apply, Register scratch);
- void emitPushArguments(LApplyArrayGeneric* apply, Register scratch);
- void emitPushArguments(LConstructArgsGeneric* construct, Register scratch);
- void emitPushArguments(LConstructArrayGeneric* construct, Register scratch);
+ void emitPushArguments(LApplyArgsGeneric* apply);
+ void emitPushArguments(LApplyArgsObj* apply);
+ void emitPushArguments(LApplyArrayGeneric* apply);
+ void emitPushArguments(LConstructArgsGeneric* construct);
+ void emitPushArguments(LConstructArrayGeneric* construct);
+
+ template <typename T>
+ void emitApplyNative(T* apply);
+ template <typename T>
+ void emitCallInvokeNativeFunction(T* apply);
+ template <typename T>
+ void emitPushNativeArguments(T* apply);
+ template <typename T>
+ void emitPushArrayAsNativeArguments(T* apply);
+ void emitPushArguments(LApplyArgsNative* apply);
+ void emitPushArguments(LApplyArgsObjNative* apply);
+ void emitPushArguments(LApplyArrayNative* apply);
+ void emitPushArguments(LConstructArgsNative* construct);
+ void emitPushArguments(LConstructArrayNative* construct);
+
+ template <typename T>
+ void emitApplyArgsGuard(T* apply);
+
+ template <typename T>
+ void emitApplyArgsObjGuard(T* apply);
+
+ template <typename T>
+ void emitApplyArrayGuard(T* apply);
template <class GetInlinedArgument>
void emitGetInlinedArgument(GetInlinedArgument* lir, Register index,
@@ -439,6 +462,7 @@ class CodeGenerator final : public CodeGeneratorSpecific {
// be mapped to an actual fuse by validateAndRegisterFuseDependencies.
enum class FuseDependencyKind {
HasSeenObjectEmulateUndefinedFuse,
+ OptimizeGetIteratorFuse,
};
// The set of fuses this code generation depends on.
@@ -449,6 +473,10 @@ class CodeGenerator final : public CodeGeneratorSpecific {
fuseDependencies += FuseDependencyKind::HasSeenObjectEmulateUndefinedFuse;
}
+ void addOptimizeGetIteratorFuseDependency() {
+ fuseDependencies += FuseDependencyKind::OptimizeGetIteratorFuse;
+ }
+
// Called during linking on main-thread: Ensures that the fuses are still
// intact, and registers a script dependency on a specific fuse before
// finishing compilation.
diff --git a/js/src/jit/Ion.cpp b/js/src/jit/Ion.cpp
index 85008006e1..e209ace846 100644
--- a/js/src/jit/Ion.cpp
+++ b/js/src/jit/Ion.cpp
@@ -253,6 +253,10 @@ bool JitRuntime::generateTrampolines(JSContext* cx) {
generateIonGenericCallStub(masm, IonGenericCallKind::Construct);
rangeRecorder.recordOffset("Trampoline: IonGenericConstruct");
+ JitSpew(JitSpew_Codegen, "# Emitting trampoline natives");
+ TrampolineNativeJitEntryOffsets nativeOffsets;
+ generateTrampolineNatives(masm, nativeOffsets, rangeRecorder);
+
Linker linker(masm);
trampolineCode_ = linker.newCode(cx, CodeKind::Other);
if (!trampolineCode_) {
@@ -264,6 +268,14 @@ bool JitRuntime::generateTrampolines(JSContext* cx) {
vtune::MarkStub(trampolineCode_, "Trampolines");
#endif
+ // Initialize TrampolineNative JitEntry array.
+ for (size_t i = 0; i < size_t(TrampolineNative::Count); i++) {
+ TrampolineNative native = TrampolineNative(i);
+ uint32_t offset = nativeOffsets[native];
+ MOZ_ASSERT(offset > 0 && offset < trampolineCode_->instructionsSize());
+ trampolineNativeJitEntries_[native] = trampolineCode_->raw() + offset;
+ }
+
return true;
}
@@ -2346,6 +2358,10 @@ static void InvalidateActivation(JS::GCContext* gcx,
JitSpew(JitSpew_IonInvalidate, "#%zu rectifier frame @ %p", frameno,
frame.fp());
break;
+ case FrameType::TrampolineNative:
+ JitSpew(JitSpew_IonInvalidate, "#%zu TrampolineNative frame @ %p",
+ frameno, frame.fp());
+ break;
case FrameType::IonICCall:
JitSpew(JitSpew_IonInvalidate, "#%zu ion IC call frame @ %p", frameno,
frame.fp());
diff --git a/js/src/jit/JSJitFrameIter.cpp b/js/src/jit/JSJitFrameIter.cpp
index 89d3de3128..fbfef8f210 100644
--- a/js/src/jit/JSJitFrameIter.cpp
+++ b/js/src/jit/JSJitFrameIter.cpp
@@ -78,7 +78,7 @@ CalleeToken JSJitFrameIter::calleeToken() const {
}
JSFunction* JSJitFrameIter::callee() const {
- MOZ_ASSERT(isScripted());
+ MOZ_ASSERT(isScripted() || isTrampolineNative());
MOZ_ASSERT(isFunctionFrame());
return CalleeTokenToFunction(calleeToken());
}
@@ -110,7 +110,7 @@ bool JSJitFrameIter::isFunctionFrame() const {
JSScript* JSJitFrameIter::script() const {
MOZ_ASSERT(isScripted());
- JSScript* script = ScriptFromCalleeToken(calleeToken());
+ JSScript* script = MaybeForwardedScriptFromCalleeToken(calleeToken());
MOZ_ASSERT(script);
return script;
}
@@ -383,6 +383,10 @@ void JSJitFrameIter::dump() const {
fprintf(stderr, " Rectifier frame\n");
fprintf(stderr, " Caller frame ptr: %p\n", current()->callerFramePtr());
break;
+ case FrameType::TrampolineNative:
+ fprintf(stderr, " TrampolineNative frame\n");
+ fprintf(stderr, " Caller frame ptr: %p\n", current()->callerFramePtr());
+ break;
case FrameType::IonICCall:
fprintf(stderr, " Ion IC call\n");
fprintf(stderr, " Caller frame ptr: %p\n", current()->callerFramePtr());
@@ -707,47 +711,47 @@ void JSJitProfilingFrameIterator::moveToNextFrame(CommonFrameLayout* frame) {
* |
* ^--- WasmToJSJit <---- (other wasm frames, not handled by this iterator)
* |
- * ^--- Arguments Rectifier
- * | ^
- * | |
- * | ^--- Ion
- * | |
- * | ^--- Baseline Stub <---- Baseline
- * | |
- * | ^--- WasmToJSJit <--- (other wasm frames)
- * | |
- * | ^--- Entry Frame (CppToJSJit)
+ * ^--- Entry Frame (BaselineInterpreter) (unwrapped)
* |
- * ^--- Entry Frame (CppToJSJit)
+ * ^--- Arguments Rectifier (unwrapped)
+ * |
+ * ^--- Trampoline Native (unwrapped)
* |
- * ^--- Entry Frame (BaselineInterpreter)
- * | ^
- * | |
- * | ^--- Ion
- * | |
- * | ^--- Baseline Stub <---- Baseline
- * | |
- * | ^--- WasmToJSJit <--- (other wasm frames)
- * | |
- * | ^--- Entry Frame (CppToJSJit)
- * | |
- * | ^--- Arguments Rectifier
+ * ^--- Entry Frame (CppToJSJit)
*
* NOTE: Keep this in sync with JitRuntime::generateProfilerExitFrameTailStub!
*/
- // Unwrap baseline interpreter entry frame.
- if (frame->prevType() == FrameType::BaselineInterpreterEntry) {
- frame = GetPreviousRawFrame<BaselineInterpreterEntryFrameLayout*>(frame);
- }
+ while (true) {
+ // Unwrap baseline interpreter entry frame.
+ if (frame->prevType() == FrameType::BaselineInterpreterEntry) {
+ frame = GetPreviousRawFrame<BaselineInterpreterEntryFrameLayout*>(frame);
+ continue;
+ }
+
+ // Unwrap rectifier frames.
+ if (frame->prevType() == FrameType::Rectifier) {
+ frame = GetPreviousRawFrame<RectifierFrameLayout*>(frame);
+ MOZ_ASSERT(frame->prevType() == FrameType::IonJS ||
+ frame->prevType() == FrameType::BaselineStub ||
+ frame->prevType() == FrameType::TrampolineNative ||
+ frame->prevType() == FrameType::WasmToJSJit ||
+ frame->prevType() == FrameType::CppToJSJit);
+ continue;
+ }
- // Unwrap rectifier frames.
- if (frame->prevType() == FrameType::Rectifier) {
- frame = GetPreviousRawFrame<RectifierFrameLayout*>(frame);
- MOZ_ASSERT(frame->prevType() == FrameType::IonJS ||
- frame->prevType() == FrameType::BaselineStub ||
- frame->prevType() == FrameType::WasmToJSJit ||
- frame->prevType() == FrameType::CppToJSJit);
+ // Unwrap TrampolineNative frames.
+ if (frame->prevType() == FrameType::TrampolineNative) {
+ frame = GetPreviousRawFrame<TrampolineNativeFrameLayout*>(frame);
+ MOZ_ASSERT(frame->prevType() == FrameType::IonJS ||
+ frame->prevType() == FrameType::BaselineStub ||
+ frame->prevType() == FrameType::Rectifier ||
+ frame->prevType() == FrameType::WasmToJSJit ||
+ frame->prevType() == FrameType::CppToJSJit);
+ continue;
+ }
+
+ break;
}
FrameType prevType = frame->prevType();
@@ -773,24 +777,31 @@ void JSJitProfilingFrameIterator::moveToNextFrame(CommonFrameLayout* frame) {
}
case FrameType::WasmToJSJit:
- // No previous js jit frame, this is a transition frame, used to
- // pass a wasm iterator the correct value of FP.
+ // No previous JS JIT frame. Set fp_ to nullptr to indicate the
+ // JSJitProfilingFrameIterator is done(). Also set wasmCallerFP_ so that
+ // the caller can pass it to a Wasm frame iterator.
resumePCinCurrentFrame_ = nullptr;
- fp_ = GetPreviousRawFrame<uint8_t*>(frame);
+ fp_ = nullptr;
type_ = FrameType::WasmToJSJit;
- MOZ_ASSERT(!done());
+ MOZ_ASSERT(!wasmCallerFP_);
+ wasmCallerFP_ = GetPreviousRawFrame<uint8_t*>(frame);
+ MOZ_ASSERT(wasmCallerFP_);
+ MOZ_ASSERT(done());
return;
case FrameType::CppToJSJit:
- // No previous frame, set to nullptr to indicate that
+ // No previous JS JIT frame. Set fp_ to nullptr to indicate the
// JSJitProfilingFrameIterator is done().
resumePCinCurrentFrame_ = nullptr;
fp_ = nullptr;
type_ = FrameType::CppToJSJit;
+ MOZ_ASSERT(!wasmCallerFP_);
+ MOZ_ASSERT(done());
return;
case FrameType::BaselineInterpreterEntry:
case FrameType::Rectifier:
+ case FrameType::TrampolineNative:
case FrameType::Exit:
case FrameType::Bailout:
case FrameType::JSJitToWasm:
diff --git a/js/src/jit/JSJitFrameIter.h b/js/src/jit/JSJitFrameIter.h
index d40a533a20..03fd06852e 100644
--- a/js/src/jit/JSJitFrameIter.h
+++ b/js/src/jit/JSJitFrameIter.h
@@ -73,6 +73,10 @@ enum class FrameType {
// wasm, and is a special kind of exit frame that doesn't have the exit
// footer. From the point of view of the jit, it can be skipped as an exit.
JSJitToWasm,
+
+ // Frame for a TrampolineNative, a JS builtin implemented with a JIT
+ // trampoline. See jit/TrampolineNatives.h.
+ TrampolineNative,
};
enum class ReadFrameArgsBehavior {
@@ -173,6 +177,9 @@ class JSJitFrameIter {
return type_ == FrameType::BaselineInterpreterEntry;
}
bool isRectifier() const { return type_ == FrameType::Rectifier; }
+ bool isTrampolineNative() const {
+ return type_ == FrameType::TrampolineNative;
+ }
bool isBareExit() const;
bool isUnwoundJitExit() const;
template <typename T>
@@ -263,6 +270,7 @@ class JitcodeGlobalTable;
class JSJitProfilingFrameIterator {
uint8_t* fp_;
+ uint8_t* wasmCallerFP_ = nullptr;
// See JS::ProfilingFrameIterator::endStackAddress_ comment.
void* endStackAddress_ = nullptr;
FrameType type_;
@@ -290,6 +298,11 @@ class JSJitProfilingFrameIterator {
MOZ_ASSERT(!done());
return fp_;
}
+ void* wasmCallerFP() const {
+ MOZ_ASSERT(done());
+ MOZ_ASSERT(bool(wasmCallerFP_) == (type_ == FrameType::WasmToJSJit));
+ return wasmCallerFP_;
+ }
inline JitFrameLayout* framePtr() const;
void* stackAddress() const { return fp(); }
FrameType frameType() const {
@@ -491,6 +504,42 @@ class SnapshotIterator {
Value read() { return allocationValue(readAllocation()); }
+ int32_t readInt32() {
+ Value val = read();
+ MOZ_RELEASE_ASSERT(val.isInt32());
+ return val.toInt32();
+ }
+
+ double readNumber() {
+ Value val = read();
+ MOZ_RELEASE_ASSERT(val.isNumber());
+ return val.toNumber();
+ }
+
+ JSString* readString() {
+ Value val = read();
+ MOZ_RELEASE_ASSERT(val.isString());
+ return val.toString();
+ }
+
+ JS::BigInt* readBigInt() {
+ Value val = read();
+ MOZ_RELEASE_ASSERT(val.isBigInt());
+ return val.toBigInt();
+ }
+
+ JSObject* readObject() {
+ Value val = read();
+ MOZ_RELEASE_ASSERT(val.isObject());
+ return &val.toObject();
+ }
+
+ JS::GCCellPtr readGCCellPtr() {
+ Value val = read();
+ MOZ_RELEASE_ASSERT(val.isGCThing());
+ return val.toGCCellPtr();
+ }
+
// Read the |Normal| value unless it is not available and that the snapshot
// provides a |Default| value. This is useful to avoid invalidations of the
// frame while we are only interested in a few properties which are provided
diff --git a/js/src/jit/JitFrames.cpp b/js/src/jit/JitFrames.cpp
index 176b988e05..45ac1f5def 100644
--- a/js/src/jit/JitFrames.cpp
+++ b/js/src/jit/JitFrames.cpp
@@ -83,6 +83,27 @@ static uint32_t NumArgAndLocalSlots(const InlineFrameIterator& frame) {
return CountArgSlots(script, frame.maybeCalleeTemplate()) + script->nfixed();
}
+static TrampolineNative TrampolineNativeForFrame(
+ JSRuntime* rt, TrampolineNativeFrameLayout* layout) {
+ JSFunction* nativeFun = CalleeTokenToFunction(layout->calleeToken());
+ MOZ_ASSERT(nativeFun->isBuiltinNative());
+ void** jitEntry = nativeFun->nativeJitEntry();
+ return rt->jitRuntime()->trampolineNativeForJitEntry(jitEntry);
+}
+
+static void UnwindTrampolineNativeFrame(JSRuntime* rt,
+ const JSJitFrameIter& frame) {
+ auto* layout = (TrampolineNativeFrameLayout*)frame.fp();
+ TrampolineNative native = TrampolineNativeForFrame(rt, layout);
+ switch (native) {
+ case TrampolineNative::ArraySort:
+ layout->getFrameData<ArraySortData>()->freeMallocData();
+ break;
+ case TrampolineNative::Count:
+ MOZ_CRASH("Invalid value");
+ }
+}
+
static void CloseLiveIteratorIon(JSContext* cx,
const InlineFrameIterator& frame,
const TryNote* tn) {
@@ -739,7 +760,7 @@ void HandleException(ResumeFromException* rfe) {
// JIT code can enter same-compartment realms, so reset cx->realm to
// this frame's realm.
- if (frame.isScripted()) {
+ if (frame.isScripted() || frame.isTrampolineNative()) {
cx->setRealmForJitExceptionHandler(iter.realm());
}
@@ -809,6 +830,8 @@ void HandleException(ResumeFromException* rfe) {
if (rfe->kind == ExceptionResumeKind::ForcedReturnBaseline) {
return;
}
+ } else if (frame.isTrampolineNative()) {
+ UnwindTrampolineNativeFrame(cx->runtime(), frame);
}
prevJitFrame = frame.current();
@@ -910,12 +933,15 @@ static inline uintptr_t ReadAllocation(const JSJitFrameIter& frame,
static void TraceThisAndArguments(JSTracer* trc, const JSJitFrameIter& frame,
JitFrameLayout* layout) {
- // Trace |this| and any extra actual arguments for an Ion frame. Tracing
- // of formal arguments is taken care of by the frame's safepoint/snapshot,
- // except when the script might have lazy arguments or rest, in which case
- // we trace them as well. We also have to trace formals if we have a
- // LazyLink frame or an InterpreterStub frame or a special JSJit to wasm
- // frame (since wasm doesn't use snapshots).
+ // Trace |this| and the actual and formal arguments of a JIT frame.
+ //
+ // Tracing of formal arguments of an Ion frame is taken care of by the frame's
+ // safepoint/snapshot. We skip tracing formal arguments if the script doesn't
+ // use |arguments| or rest, because the register allocator can spill values to
+ // argument slots in this case.
+ //
+ // For other frames such as LazyLink frames or InterpreterStub frames, we
+ // always trace all actual and formal arguments.
if (!CalleeTokenIsFunction(layout->calleeToken())) {
return;
@@ -927,8 +953,7 @@ static void TraceThisAndArguments(JSTracer* trc, const JSJitFrameIter& frame,
size_t numArgs = std::max(layout->numActualArgs(), numFormals);
size_t firstArg = 0;
- if (frame.type() != FrameType::JSJitToWasm &&
- !frame.isExitFrameLayout<CalledFromJitExitFrameLayout>() &&
+ if (frame.isIonScripted() &&
!fun->nonLazyScript()->mayReadFrameArgsDirectly()) {
firstArg = numFormals;
}
@@ -936,17 +961,17 @@ static void TraceThisAndArguments(JSTracer* trc, const JSJitFrameIter& frame,
Value* argv = layout->thisAndActualArgs();
// Trace |this|.
- TraceRoot(trc, argv, "ion-thisv");
+ TraceRoot(trc, argv, "jit-thisv");
// Trace arguments. Note + 1 for thisv.
for (size_t i = firstArg; i < numArgs; i++) {
- TraceRoot(trc, &argv[i + 1], "ion-argv");
+ TraceRoot(trc, &argv[i + 1], "jit-argv");
}
// Always trace the new.target from the frame. It's not in the snapshots.
// +1 to pass |this|
if (CalleeTokenIsConstructing(layout->calleeToken())) {
- TraceRoot(trc, &argv[1 + numArgs], "ion-newTarget");
+ TraceRoot(trc, &argv[1 + numArgs], "jit-newTarget");
}
}
@@ -1397,6 +1422,22 @@ static void TraceJSJitToWasmFrame(JSTracer* trc, const JSJitFrameIter& frame) {
TraceThisAndArguments(trc, frame, layout);
}
+static void TraceTrampolineNativeFrame(JSTracer* trc,
+ const JSJitFrameIter& frame) {
+ auto* layout = (TrampolineNativeFrameLayout*)frame.fp();
+ layout->replaceCalleeToken(TraceCalleeToken(trc, layout->calleeToken()));
+ TraceThisAndArguments(trc, frame, layout);
+
+ TrampolineNative native = TrampolineNativeForFrame(trc->runtime(), layout);
+ switch (native) {
+ case TrampolineNative::ArraySort:
+ layout->getFrameData<ArraySortData>()->trace(trc);
+ break;
+ case TrampolineNative::Count:
+ MOZ_CRASH("Invalid value");
+ }
+}
+
static void TraceJitActivation(JSTracer* trc, JitActivation* activation) {
#ifdef CHECK_OSIPOINT_REGISTERS
if (JitOptions.checkOsiPointRegisters) {
@@ -1439,6 +1480,9 @@ static void TraceJitActivation(JSTracer* trc, JitActivation* activation) {
case FrameType::Rectifier:
TraceRectifierFrame(trc, jitFrame);
break;
+ case FrameType::TrampolineNative:
+ TraceTrampolineNativeFrame(trc, jitFrame);
+ break;
case FrameType::IonICCall:
TraceIonICCallFrame(trc, jitFrame);
break;
diff --git a/js/src/jit/JitFrames.h b/js/src/jit/JitFrames.h
index ab882e7986..47c176492b 100644
--- a/js/src/jit/JitFrames.h
+++ b/js/src/jit/JitFrames.h
@@ -299,6 +299,17 @@ class RectifierFrameLayout : public JitFrameLayout {
static inline size_t Size() { return sizeof(RectifierFrameLayout); }
};
+class TrampolineNativeFrameLayout : public JitFrameLayout {
+ public:
+ static inline size_t Size() { return sizeof(TrampolineNativeFrameLayout); }
+
+ template <typename T>
+ T* getFrameData() {
+ uint8_t* raw = reinterpret_cast<uint8_t*>(this) - sizeof(T);
+ return reinterpret_cast<T*>(raw);
+ }
+};
+
class WasmToJSJitFrameLayout : public JitFrameLayout {
public:
static inline size_t Size() { return sizeof(WasmToJSJitFrameLayout); }
diff --git a/js/src/jit/JitOptions.cpp b/js/src/jit/JitOptions.cpp
index e9d389cf60..053cf868a7 100644
--- a/js/src/jit/JitOptions.cpp
+++ b/js/src/jit/JitOptions.cpp
@@ -447,7 +447,8 @@ void DefaultJitOptions::resetNormalIonWarmUpThreshold() {
void DefaultJitOptions::maybeSetWriteProtectCode(bool val) {
#ifdef JS_USE_APPLE_FAST_WX
- // On Apple Silicon we always use pthread_jit_write_protect_np.
+ // On Apple Silicon we always use pthread_jit_write_protect_np, or
+ // be_memory_inline_jit_restrict_*.
MOZ_ASSERT(!writeProtectCode);
#else
writeProtectCode = val;
diff --git a/js/src/jit/JitRuntime.h b/js/src/jit/JitRuntime.h
index 7d038ed0e2..383efca437 100644
--- a/js/src/jit/JitRuntime.h
+++ b/js/src/jit/JitRuntime.h
@@ -27,6 +27,7 @@
#include "jit/JitCode.h"
#include "jit/JitHints.h"
#include "jit/shared/Assembler-shared.h"
+#include "jit/TrampolineNatives.h"
#include "js/AllocPolicy.h"
#include "js/ProfilingFrameIterator.h"
#include "js/TypeDecls.h"
@@ -234,6 +235,13 @@ class JitRuntime {
MainThreadData<IonCompileTaskList> ionLazyLinkList_;
MainThreadData<size_t> ionLazyLinkListSize_{0};
+ // Pointer to trampoline code for each TrampolineNative. The JSFunction has
+ // a JitEntry pointer that points to an item in this array.
+ using TrampolineNativeJitEntryArray =
+ mozilla::EnumeratedArray<TrampolineNative, void*,
+ size_t(TrampolineNative::Count)>;
+ TrampolineNativeJitEntryArray trampolineNativeJitEntries_{};
+
#ifdef DEBUG
// Flag that can be set from JIT code to indicate it's invalid to call
// arbitrary JS code in a particular region. This is checked in RunScript.
@@ -293,6 +301,14 @@ class JitRuntime {
void generateBaselineInterpreterEntryTrampoline(MacroAssembler& masm);
void generateInterpreterEntryTrampoline(MacroAssembler& masm);
+ using TrampolineNativeJitEntryOffsets =
+ mozilla::EnumeratedArray<TrampolineNative, uint32_t,
+ size_t(TrampolineNative::Count)>;
+ void generateTrampolineNatives(MacroAssembler& masm,
+ TrampolineNativeJitEntryOffsets& offsets,
+ PerfSpewerRangeRecorder& rangeRecorder);
+ uint32_t generateArraySortTrampoline(MacroAssembler& masm);
+
void bindLabelToOffset(Label* label, uint32_t offset) {
MOZ_ASSERT(!trampolineCode_);
label->bind(offset);
@@ -418,6 +434,20 @@ class JitRuntime {
return trampolineCode(ionGenericCallStubOffset_[kind]);
}
+ void** trampolineNativeJitEntry(TrampolineNative native) {
+ void** jitEntry = &trampolineNativeJitEntries_[native];
+ MOZ_ASSERT(*jitEntry >= trampolineCode_->raw());
+ MOZ_ASSERT(*jitEntry <
+ trampolineCode_->raw() + trampolineCode_->instructionsSize());
+ return jitEntry;
+ }
+ TrampolineNative trampolineNativeForJitEntry(void** entry) {
+ MOZ_RELEASE_ASSERT(entry >= trampolineNativeJitEntries_.begin());
+ size_t index = entry - trampolineNativeJitEntries_.begin();
+ MOZ_RELEASE_ASSERT(index < size_t(TrampolineNative::Count));
+ return TrampolineNative(index);
+ }
+
bool hasJitcodeGlobalTable() const { return jitcodeGlobalTable_ != nullptr; }
JitcodeGlobalTable* getJitcodeGlobalTable() {
diff --git a/js/src/jit/LIROps.yaml b/js/src/jit/LIROps.yaml
index f13c4b0745..880e756f74 100644
--- a/js/src/jit/LIROps.yaml
+++ b/js/src/jit/LIROps.yaml
@@ -632,6 +632,21 @@
- name: ConstructArrayGeneric
gen_boilerplate: false
+- name: ApplyArgsNative
+ gen_boilerplate: false
+
+- name: ApplyArgsObjNative
+ gen_boilerplate: false
+
+- name: ApplyArrayNative
+ gen_boilerplate: false
+
+- name: ConstructArgsNative
+ gen_boilerplate: false
+
+- name: ConstructArrayNative
+ gen_boilerplate: false
+
- name: TestIAndBranch
gen_boilerplate: false
@@ -2189,7 +2204,7 @@
mir_op: ClampToUint8
- name: LoadScriptedProxyHandler
- result_type: BoxedValue
+ result_type: WordSized
operands:
object: WordSized
mir_op: true
@@ -3694,6 +3709,7 @@
result_type: WordSized
operands:
object: WordSized
+ mir_op: true
- name: GuardHasGetterSetter
operands:
diff --git a/js/src/jit/Lowering.cpp b/js/src/jit/Lowering.cpp
index b0007a114d..f7b898f240 100644
--- a/js/src/jit/Lowering.cpp
+++ b/js/src/jit/Lowering.cpp
@@ -654,12 +654,23 @@ void LIRGenerator::visitApplyArgs(MApplyArgs* apply) {
static_assert(CallTempReg2 != JSReturnReg_Type);
static_assert(CallTempReg2 != JSReturnReg_Data);
- LApplyArgsGeneric* lir = new (alloc()) LApplyArgsGeneric(
- useFixedAtStart(apply->getFunction(), CallTempReg3),
- useFixedAtStart(apply->getArgc(), CallTempReg0),
- useBoxFixedAtStart(apply->getThis(), CallTempReg4, CallTempReg5),
- tempFixed(CallTempReg1), // object register
- tempFixed(CallTempReg2)); // stack counter register
+ auto function = useFixedAtStart(apply->getFunction(), CallTempReg3);
+ auto argc = useFixedAtStart(apply->getArgc(), CallTempReg0);
+ auto thisValue =
+ useBoxFixedAtStart(apply->getThis(), CallTempReg4, CallTempReg5);
+ auto tempObj = tempFixed(CallTempReg1); // object register
+ auto tempCopy = tempFixed(CallTempReg2); // copy register
+
+ auto* target = apply->getSingleTarget();
+
+ LInstruction* lir;
+ if (target && target->isNativeWithoutJitEntry()) {
+ lir = new (alloc())
+ LApplyArgsNative(function, argc, thisValue, tempObj, tempCopy);
+ } else {
+ lir = new (alloc())
+ LApplyArgsGeneric(function, argc, thisValue, tempObj, tempCopy);
+ }
// Bailout is needed in the case of too many values in the arguments array.
assignSnapshot(lir, apply->bailoutKind());
@@ -675,12 +686,23 @@ void LIRGenerator::visitApplyArgsObj(MApplyArgsObj* apply) {
static_assert(CallTempReg2 != JSReturnReg_Type);
static_assert(CallTempReg2 != JSReturnReg_Data);
- LApplyArgsObj* lir = new (alloc()) LApplyArgsObj(
- useFixedAtStart(apply->getFunction(), CallTempReg3),
- useFixedAtStart(apply->getArgsObj(), CallTempReg0),
- useBoxFixedAtStart(apply->getThis(), CallTempReg4, CallTempReg5),
- tempFixed(CallTempReg1), // object register
- tempFixed(CallTempReg2)); // stack counter register
+ auto function = useFixedAtStart(apply->getFunction(), CallTempReg3);
+ auto argsObj = useFixedAtStart(apply->getArgsObj(), CallTempReg0);
+ auto thisValue =
+ useBoxFixedAtStart(apply->getThis(), CallTempReg4, CallTempReg5);
+ auto tempObj = tempFixed(CallTempReg1); // object register
+ auto tempCopy = tempFixed(CallTempReg2); // copy register
+
+ auto* target = apply->getSingleTarget();
+
+ LInstruction* lir;
+ if (target && target->isNativeWithoutJitEntry()) {
+ lir = new (alloc())
+ LApplyArgsObjNative(function, argsObj, thisValue, tempObj, tempCopy);
+ } else {
+ lir = new (alloc())
+ LApplyArgsObj(function, argsObj, thisValue, tempObj, tempCopy);
+ }
// Bailout is needed in the case of too many values in the arguments array.
assignSnapshot(lir, apply->bailoutKind());
@@ -696,12 +718,23 @@ void LIRGenerator::visitApplyArray(MApplyArray* apply) {
static_assert(CallTempReg2 != JSReturnReg_Type);
static_assert(CallTempReg2 != JSReturnReg_Data);
- LApplyArrayGeneric* lir = new (alloc()) LApplyArrayGeneric(
- useFixedAtStart(apply->getFunction(), CallTempReg3),
- useFixedAtStart(apply->getElements(), CallTempReg0),
- useBoxFixedAtStart(apply->getThis(), CallTempReg4, CallTempReg5),
- tempFixed(CallTempReg1), // object register
- tempFixed(CallTempReg2)); // stack counter register
+ auto function = useFixedAtStart(apply->getFunction(), CallTempReg3);
+ auto elements = useFixedAtStart(apply->getElements(), CallTempReg0);
+ auto thisValue =
+ useBoxFixedAtStart(apply->getThis(), CallTempReg4, CallTempReg5);
+ auto tempObj = tempFixed(CallTempReg1); // object register
+ auto tempCopy = tempFixed(CallTempReg2); // copy register
+
+ auto* target = apply->getSingleTarget();
+
+ LInstruction* lir;
+ if (target && target->isNativeWithoutJitEntry()) {
+ lir = new (alloc())
+ LApplyArrayNative(function, elements, thisValue, tempObj, tempCopy);
+ } else {
+ lir = new (alloc())
+ LApplyArrayGeneric(function, elements, thisValue, tempObj, tempCopy);
+ }
// Bailout is needed in the case of too many values in the array, or empty
// space at the end of the array.
@@ -721,12 +754,26 @@ void LIRGenerator::visitConstructArgs(MConstructArgs* mir) {
static_assert(CallTempReg2 != JSReturnReg_Type);
static_assert(CallTempReg2 != JSReturnReg_Data);
- auto* lir = new (alloc()) LConstructArgsGeneric(
- useFixedAtStart(mir->getFunction(), CallTempReg3),
- useFixedAtStart(mir->getArgc(), CallTempReg0),
- useFixedAtStart(mir->getNewTarget(), CallTempReg1),
- useBoxFixedAtStart(mir->getThis(), CallTempReg4, CallTempReg5),
- tempFixed(CallTempReg2));
+ auto function = useFixedAtStart(mir->getFunction(), CallTempReg3);
+ auto argc = useFixedAtStart(mir->getArgc(), CallTempReg0);
+ auto newTarget = useFixedAtStart(mir->getNewTarget(), CallTempReg1);
+ auto temp = tempFixed(CallTempReg2);
+
+ auto* target = mir->getSingleTarget();
+
+ LInstruction* lir;
+ if (target && target->isNativeWithoutJitEntry()) {
+ auto temp2 = tempFixed(CallTempReg4);
+
+ lir = new (alloc())
+ LConstructArgsNative(function, argc, newTarget, temp, temp2);
+ } else {
+ auto thisValue =
+ useBoxFixedAtStart(mir->getThis(), CallTempReg4, CallTempReg5);
+
+ lir = new (alloc())
+ LConstructArgsGeneric(function, argc, newTarget, thisValue, temp);
+ }
// Bailout is needed in the case of too many values in the arguments array.
assignSnapshot(lir, mir->bailoutKind());
@@ -745,12 +792,26 @@ void LIRGenerator::visitConstructArray(MConstructArray* mir) {
static_assert(CallTempReg2 != JSReturnReg_Type);
static_assert(CallTempReg2 != JSReturnReg_Data);
- auto* lir = new (alloc()) LConstructArrayGeneric(
- useFixedAtStart(mir->getFunction(), CallTempReg3),
- useFixedAtStart(mir->getElements(), CallTempReg0),
- useFixedAtStart(mir->getNewTarget(), CallTempReg1),
- useBoxFixedAtStart(mir->getThis(), CallTempReg4, CallTempReg5),
- tempFixed(CallTempReg2));
+ auto function = useFixedAtStart(mir->getFunction(), CallTempReg3);
+ auto elements = useFixedAtStart(mir->getElements(), CallTempReg0);
+ auto newTarget = useFixedAtStart(mir->getNewTarget(), CallTempReg1);
+ auto temp = tempFixed(CallTempReg2);
+
+ auto* target = mir->getSingleTarget();
+
+ LInstruction* lir;
+ if (target && target->isNativeWithoutJitEntry()) {
+ auto temp2 = tempFixed(CallTempReg4);
+
+ lir = new (alloc())
+ LConstructArrayNative(function, elements, newTarget, temp, temp2);
+ } else {
+ auto thisValue =
+ useBoxFixedAtStart(mir->getThis(), CallTempReg4, CallTempReg5);
+
+ lir = new (alloc())
+ LConstructArrayGeneric(function, elements, newTarget, thisValue, temp);
+ }
// Bailout is needed in the case of too many values in the array, or empty
// space at the end of the array.
@@ -3241,7 +3302,9 @@ void LIRGenerator::visitWasmAnyRefFromJSString(MWasmAnyRefFromJSString* ins) {
}
void LIRGenerator::visitWasmNewI31Ref(MWasmNewI31Ref* ins) {
- LWasmNewI31Ref* lir = new (alloc()) LWasmNewI31Ref(useRegister(ins->input()));
+ // If it's a constant, it will be put directly into the register.
+ LWasmNewI31Ref* lir =
+ new (alloc()) LWasmNewI31Ref(useRegisterOrConstant(ins->input()));
define(lir, ins);
}
@@ -4686,7 +4749,8 @@ void LIRGenerator::visitLoadScriptedProxyHandler(
MLoadScriptedProxyHandler* ins) {
LLoadScriptedProxyHandler* lir = new (alloc())
LLoadScriptedProxyHandler(useRegisterAtStart(ins->object()));
- defineBox(lir, ins);
+ assignSnapshot(lir, ins->bailoutKind());
+ define(lir, ins);
}
void LIRGenerator::visitIdToStringOrSymbol(MIdToStringOrSymbol* ins) {
@@ -6750,7 +6814,11 @@ void LIRGenerator::visitLoadWrapperTarget(MLoadWrapperTarget* ins) {
MDefinition* object = ins->object();
MOZ_ASSERT(object->type() == MIRType::Object);
- define(new (alloc()) LLoadWrapperTarget(useRegisterAtStart(object)), ins);
+ auto* lir = new (alloc()) LLoadWrapperTarget(useRegisterAtStart(object));
+ if (ins->fallible()) {
+ assignSnapshot(lir, ins->bailoutKind());
+ }
+ define(lir, ins);
}
void LIRGenerator::visitGuardHasGetterSetter(MGuardHasGetterSetter* ins) {
diff --git a/js/src/jit/MIR.cpp b/js/src/jit/MIR.cpp
index c6daecb166..a74406567b 100644
--- a/js/src/jit/MIR.cpp
+++ b/js/src/jit/MIR.cpp
@@ -689,7 +689,62 @@ MDefinition* MTest::foldsNeedlessControlFlow(TempAllocator& alloc) {
return MGoto::New(alloc, ifTrue());
}
+// If a test is dominated by either the true or false path of a previous test of
+// the same condition, then the test is redundant and can be converted into a
+// goto true or goto false, respectively.
+MDefinition* MTest::foldsRedundantTest(TempAllocator& alloc) {
+ MBasicBlock* myBlock = this->block();
+ MDefinition* originalInput = getOperand(0);
+
+ // Handle single and double negatives. This ensures that we do not miss a
+ // folding opportunity due to a condition being inverted.
+ MDefinition* newInput = input();
+ bool inverted = false;
+ if (originalInput->isNot()) {
+ newInput = originalInput->toNot()->input();
+ inverted = true;
+ if (originalInput->toNot()->input()->isNot()) {
+ newInput = originalInput->toNot()->input()->toNot()->input();
+ inverted = false;
+ }
+ }
+
+ // The specific order of traversal does not matter. If there are multiple
+ // dominating redundant tests, they will either agree on direction (in which
+ // case we will prune the same way regardless of order), or they will
+ // disagree, in which case we will eventually be marked entirely dead by the
+ // folding of the redundant parent.
+ for (MUseIterator i(newInput->usesBegin()), e(newInput->usesEnd()); i != e;
+ ++i) {
+ if (!i->consumer()->isDefinition()) {
+ continue;
+ }
+ if (!i->consumer()->toDefinition()->isTest()) {
+ continue;
+ }
+ MTest* otherTest = i->consumer()->toDefinition()->toTest();
+ if (otherTest == this) {
+ continue;
+ }
+
+ if (otherTest->ifFalse()->dominates(myBlock)) {
+ // This test cannot be true, so fold to a goto false.
+ return MGoto::New(alloc, inverted ? ifTrue() : ifFalse());
+ }
+ if (otherTest->ifTrue()->dominates(myBlock)) {
+ // This test cannot be false, so fold to a goto true.
+ return MGoto::New(alloc, inverted ? ifFalse() : ifTrue());
+ }
+ }
+
+ return nullptr;
+}
+
MDefinition* MTest::foldsTo(TempAllocator& alloc) {
+ if (MDefinition* def = foldsRedundantTest(alloc)) {
+ return def;
+ }
+
if (MDefinition* def = foldsDoubleNegation(alloc)) {
return def;
}
@@ -7187,6 +7242,16 @@ AliasSet MLoadWrapperTarget::getAliasSet() const {
return AliasSet::Load(AliasSet::Any);
}
+bool MLoadWrapperTarget::congruentTo(const MDefinition* ins) const {
+ if (!ins->isLoadWrapperTarget()) {
+ return false;
+ }
+ if (ins->toLoadWrapperTarget()->fallible() != fallible()) {
+ return false;
+ }
+ return congruentIfOperandsEqual(ins);
+}
+
AliasSet MGuardHasGetterSetter::getAliasSet() const {
return AliasSet::Load(AliasSet::ObjectFields);
}
diff --git a/js/src/jit/MIR.h b/js/src/jit/MIR.h
index d882665a65..c672092f04 100644
--- a/js/src/jit/MIR.h
+++ b/js/src/jit/MIR.h
@@ -1890,6 +1890,7 @@ class MTest : public MAryControlInstruction<1, 2>, public TestPolicy::Data {
MDefinition* foldsConstant(TempAllocator& alloc);
MDefinition* foldsTypes(TempAllocator& alloc);
MDefinition* foldsNeedlessControlFlow(TempAllocator& alloc);
+ MDefinition* foldsRedundantTest(TempAllocator& alloc);
MDefinition* foldsTo(TempAllocator& alloc) override;
#ifdef DEBUG
@@ -2309,7 +2310,7 @@ class WrappedFunction : public TempObject {
return nativeFun_->nativeUnchecked();
}
bool hasJitInfo() const {
- return flags_.isBuiltinNative() && nativeFun_->jitInfoUnchecked();
+ return flags_.canHaveJitInfo() && nativeFun_->jitInfoUnchecked();
}
const JSJitInfo* jitInfo() const {
MOZ_ASSERT(hasJitInfo());
diff --git a/js/src/jit/MIROps.yaml b/js/src/jit/MIROps.yaml
index 7f0df52742..78ab989221 100644
--- a/js/src/jit/MIROps.yaml
+++ b/js/src/jit/MIROps.yaml
@@ -539,7 +539,8 @@
- name: LoadScriptedProxyHandler
operands:
object: Object
- result_type: Value
+ result_type: Object
+ guard: true
congruent_to: if_operands_equal
alias_set: none
@@ -1421,8 +1422,6 @@
index: Int32
type_policy: none
alias_set: custom
- # By default no, unless built as a recovered instruction.
- can_recover: custom
# Load the function length. Bails for functions with lazy scripts or a
# resolved "length" property.
@@ -2810,13 +2809,16 @@
alias_set: none
# Load the target object from a proxy wrapper. The target is stored in the
-# proxy object's private slot.
+# proxy object's private slot. This operation is fallible if the proxy can
+# be revoked.
- name: LoadWrapperTarget
operands:
object: Object
+ arguments:
+ fallible: bool
result_type: Object
movable: true
- congruent_to: if_operands_equal
+ congruent_to: custom
# Can't use |AliasSet::None| because the target changes on navigation.
# TODO: Investigate using a narrower or a custom alias set.
alias_set: custom
diff --git a/js/src/jit/MacroAssembler.cpp b/js/src/jit/MacroAssembler.cpp
index 3b094d49dc..9fc4b96830 100644
--- a/js/src/jit/MacroAssembler.cpp
+++ b/js/src/jit/MacroAssembler.cpp
@@ -3169,6 +3169,8 @@ void MacroAssembler::emitMegamorphicCachedSetSlot(
passABIArg(scratch2);
callWithABI<Fn, NativeObject::growSlotsPure>();
storeCallPointerResult(scratch2);
+
+ MOZ_ASSERT(!save.has(scratch2));
PopRegsInMask(save);
branchIfFalseBool(scratch2, &cacheMiss);
@@ -7803,6 +7805,24 @@ void MacroAssembler::loadArgumentsObjectLength(Register obj, Register output,
rshift32(Imm32(ArgumentsObject::PACKED_BITS_COUNT), output);
}
+void MacroAssembler::loadArgumentsObjectLength(Register obj, Register output) {
+ // Get initial length value.
+ unboxInt32(Address(obj, ArgumentsObject::getInitialLengthSlotOffset()),
+ output);
+
+#ifdef DEBUG
+ // Assert length hasn't been overridden.
+ Label ok;
+ branchTest32(Assembler::Zero, output,
+ Imm32(ArgumentsObject::LENGTH_OVERRIDDEN_BIT), &ok);
+ assumeUnreachable("arguments object length has been overridden");
+ bind(&ok);
+#endif
+
+ // Shift out arguments length and return it.
+ rshift32(Imm32(ArgumentsObject::PACKED_BITS_COUNT), output);
+}
+
void MacroAssembler::branchTestArgumentsObjectFlags(Register obj, Register temp,
uint32_t flags,
Condition cond,
diff --git a/js/src/jit/MacroAssembler.h b/js/src/jit/MacroAssembler.h
index 361de3ac5f..114aaa47d7 100644
--- a/js/src/jit/MacroAssembler.h
+++ b/js/src/jit/MacroAssembler.h
@@ -5291,6 +5291,7 @@ class MacroAssembler : public MacroAssemblerSpecific {
Label* fail);
void loadArgumentsObjectLength(Register obj, Register output, Label* fail);
+ void loadArgumentsObjectLength(Register obj, Register output);
void branchTestArgumentsObjectFlags(Register obj, Register temp,
uint32_t flags, Condition cond,
diff --git a/js/src/jit/PerfSpewer.cpp b/js/src/jit/PerfSpewer.cpp
index 81954f3d92..c9d9cc8d88 100644
--- a/js/src/jit/PerfSpewer.cpp
+++ b/js/src/jit/PerfSpewer.cpp
@@ -23,7 +23,7 @@
# define gettid() static_cast<pid_t>(syscall(__NR_gettid))
#endif
-#if defined(JS_ION_PERF) && (defined(ANDROID) || defined(XP_MACOSX))
+#if defined(JS_ION_PERF) && (defined(ANDROID) || defined(XP_DARWIN))
# include <limits.h>
# include <stdlib.h>
# include <unistd.h>
@@ -42,7 +42,7 @@ char* get_current_dir_name() {
}
#endif
-#if defined(JS_ION_PERF) && defined(XP_MACOSX)
+#if defined(JS_ION_PERF) && defined(XP_DARWIN)
# include <pthread.h>
# include <unistd.h>
@@ -128,7 +128,7 @@ static uint64_t GetMonotonicTimestamp() {
return TimeStamp::Now().RawClockMonotonicNanosecondsSinceBoot();
# elif XP_WIN
return TimeStamp::Now().RawQueryPerformanceCounterValue().value();
-# elif XP_MACOSX
+# elif XP_DARWIN
return TimeStamp::Now().RawMachAbsoluteTimeNanoseconds();
# else
MOZ_CRASH("no timestamp");
diff --git a/js/src/jit/ProcessExecutableMemory.cpp b/js/src/jit/ProcessExecutableMemory.cpp
index 830d15f7fb..0c00b17c73 100644
--- a/js/src/jit/ProcessExecutableMemory.cpp
+++ b/js/src/jit/ProcessExecutableMemory.cpp
@@ -46,6 +46,10 @@
# include <valgrind/valgrind.h>
#endif
+#if defined(XP_IOS)
+# include <BrowserEngineCore/BEMemory.h>
+#endif
+
using namespace js;
using namespace js::jit;
@@ -990,11 +994,19 @@ bool js::jit::ReprotectRegion(void* start, size_t size,
#ifdef JS_USE_APPLE_FAST_WX
void js::jit::AutoMarkJitCodeWritableForThread::markExecutable(
bool executable) {
+# if defined(XP_IOS)
+ if (executable) {
+ be_memory_inline_jit_restrict_rwx_to_rx_with_witness();
+ } else {
+ be_memory_inline_jit_restrict_rwx_to_rw_with_witness();
+ }
+# else
if (__builtin_available(macOS 11.0, *)) {
pthread_jit_write_protect_np(executable);
} else {
MOZ_CRASH("pthread_jit_write_protect_np must be available");
}
+# endif
}
#endif
diff --git a/js/src/jit/Recover.cpp b/js/src/jit/Recover.cpp
index 220ffe7bb2..4c1ff56436 100644
--- a/js/src/jit/Recover.cpp
+++ b/js/src/jit/Recover.cpp
@@ -6,6 +6,8 @@
#include "jit/Recover.h"
+#include "mozilla/Casting.h"
+
#include "jsmath.h"
#include "builtin/Object.h"
@@ -495,16 +497,15 @@ bool MBigIntAdd::writeRecoverData(CompactBufferWriter& writer) const {
RBigIntAdd::RBigIntAdd(CompactBufferReader& reader) {}
bool RBigIntAdd::recover(JSContext* cx, SnapshotIterator& iter) const {
- RootedValue lhs(cx, iter.read());
- RootedValue rhs(cx, iter.read());
- RootedValue result(cx);
+ Rooted<BigInt*> lhs(cx, iter.readBigInt());
+ Rooted<BigInt*> rhs(cx, iter.readBigInt());
- MOZ_ASSERT(lhs.isBigInt() && rhs.isBigInt());
- if (!js::AddValues(cx, &lhs, &rhs, &result)) {
+ BigInt* result = BigInt::add(cx, lhs, rhs);
+ if (!result) {
return false;
}
- iter.storeInstructionResult(result);
+ iter.storeInstructionResult(BigIntValue(result));
return true;
}
@@ -517,16 +518,15 @@ bool MBigIntSub::writeRecoverData(CompactBufferWriter& writer) const {
RBigIntSub::RBigIntSub(CompactBufferReader& reader) {}
bool RBigIntSub::recover(JSContext* cx, SnapshotIterator& iter) const {
- RootedValue lhs(cx, iter.read());
- RootedValue rhs(cx, iter.read());
- RootedValue result(cx);
+ Rooted<BigInt*> lhs(cx, iter.readBigInt());
+ Rooted<BigInt*> rhs(cx, iter.readBigInt());
- MOZ_ASSERT(lhs.isBigInt() && rhs.isBigInt());
- if (!js::SubValues(cx, &lhs, &rhs, &result)) {
+ BigInt* result = BigInt::sub(cx, lhs, rhs);
+ if (!result) {
return false;
}
- iter.storeInstructionResult(result);
+ iter.storeInstructionResult(BigIntValue(result));
return true;
}
@@ -539,16 +539,15 @@ bool MBigIntMul::writeRecoverData(CompactBufferWriter& writer) const {
RBigIntMul::RBigIntMul(CompactBufferReader& reader) {}
bool RBigIntMul::recover(JSContext* cx, SnapshotIterator& iter) const {
- RootedValue lhs(cx, iter.read());
- RootedValue rhs(cx, iter.read());
- RootedValue result(cx);
+ Rooted<BigInt*> lhs(cx, iter.readBigInt());
+ Rooted<BigInt*> rhs(cx, iter.readBigInt());
- MOZ_ASSERT(lhs.isBigInt() && rhs.isBigInt());
- if (!js::MulValues(cx, &lhs, &rhs, &result)) {
+ BigInt* result = BigInt::mul(cx, lhs, rhs);
+ if (!result) {
return false;
}
- iter.storeInstructionResult(result);
+ iter.storeInstructionResult(BigIntValue(result));
return true;
}
@@ -561,18 +560,17 @@ bool MBigIntDiv::writeRecoverData(CompactBufferWriter& writer) const {
RBigIntDiv::RBigIntDiv(CompactBufferReader& reader) {}
bool RBigIntDiv::recover(JSContext* cx, SnapshotIterator& iter) const {
- RootedValue lhs(cx, iter.read());
- RootedValue rhs(cx, iter.read());
- RootedValue result(cx);
-
- MOZ_ASSERT(lhs.isBigInt() && rhs.isBigInt());
- MOZ_ASSERT(!rhs.toBigInt()->isZero(),
+ Rooted<BigInt*> lhs(cx, iter.readBigInt());
+ Rooted<BigInt*> rhs(cx, iter.readBigInt());
+ MOZ_ASSERT(!rhs->isZero(),
"division by zero throws and therefore can't be recovered");
- if (!js::DivValues(cx, &lhs, &rhs, &result)) {
+
+ BigInt* result = BigInt::div(cx, lhs, rhs);
+ if (!result) {
return false;
}
- iter.storeInstructionResult(result);
+ iter.storeInstructionResult(BigIntValue(result));
return true;
}
@@ -585,18 +583,17 @@ bool MBigIntMod::writeRecoverData(CompactBufferWriter& writer) const {
RBigIntMod::RBigIntMod(CompactBufferReader& reader) {}
bool RBigIntMod::recover(JSContext* cx, SnapshotIterator& iter) const {
- RootedValue lhs(cx, iter.read());
- RootedValue rhs(cx, iter.read());
- RootedValue result(cx);
-
- MOZ_ASSERT(lhs.isBigInt() && rhs.isBigInt());
- MOZ_ASSERT(!rhs.toBigInt()->isZero(),
+ Rooted<BigInt*> lhs(cx, iter.readBigInt());
+ Rooted<BigInt*> rhs(cx, iter.readBigInt());
+ MOZ_ASSERT(!rhs->isZero(),
"division by zero throws and therefore can't be recovered");
- if (!js::ModValues(cx, &lhs, &rhs, &result)) {
+
+ BigInt* result = BigInt::mod(cx, lhs, rhs);
+ if (!result) {
return false;
}
- iter.storeInstructionResult(result);
+ iter.storeInstructionResult(BigIntValue(result));
return true;
}
@@ -609,18 +606,17 @@ bool MBigIntPow::writeRecoverData(CompactBufferWriter& writer) const {
RBigIntPow::RBigIntPow(CompactBufferReader& reader) {}
bool RBigIntPow::recover(JSContext* cx, SnapshotIterator& iter) const {
- RootedValue lhs(cx, iter.read());
- RootedValue rhs(cx, iter.read());
- RootedValue result(cx);
-
- MOZ_ASSERT(lhs.isBigInt() && rhs.isBigInt());
- MOZ_ASSERT(!rhs.toBigInt()->isNegative(),
+ Rooted<BigInt*> lhs(cx, iter.readBigInt());
+ Rooted<BigInt*> rhs(cx, iter.readBigInt());
+ MOZ_ASSERT(!rhs->isNegative(),
"negative exponent throws and therefore can't be recovered");
- if (!js::PowValues(cx, &lhs, &rhs, &result)) {
+
+ BigInt* result = BigInt::pow(cx, lhs, rhs);
+ if (!result) {
return false;
}
- iter.storeInstructionResult(result);
+ iter.storeInstructionResult(BigIntValue(result));
return true;
}
@@ -633,16 +629,15 @@ bool MBigIntBitAnd::writeRecoverData(CompactBufferWriter& writer) const {
RBigIntBitAnd::RBigIntBitAnd(CompactBufferReader& reader) {}
bool RBigIntBitAnd::recover(JSContext* cx, SnapshotIterator& iter) const {
- RootedValue lhs(cx, iter.read());
- RootedValue rhs(cx, iter.read());
- RootedValue result(cx);
+ Rooted<BigInt*> lhs(cx, iter.readBigInt());
+ Rooted<BigInt*> rhs(cx, iter.readBigInt());
- MOZ_ASSERT(lhs.isBigInt() && rhs.isBigInt());
- if (!js::BitAnd(cx, &lhs, &rhs, &result)) {
+ BigInt* result = BigInt::bitAnd(cx, lhs, rhs);
+ if (!result) {
return false;
}
- iter.storeInstructionResult(result);
+ iter.storeInstructionResult(BigIntValue(result));
return true;
}
@@ -655,16 +650,15 @@ bool MBigIntBitOr::writeRecoverData(CompactBufferWriter& writer) const {
RBigIntBitOr::RBigIntBitOr(CompactBufferReader& reader) {}
bool RBigIntBitOr::recover(JSContext* cx, SnapshotIterator& iter) const {
- RootedValue lhs(cx, iter.read());
- RootedValue rhs(cx, iter.read());
- RootedValue result(cx);
+ Rooted<BigInt*> lhs(cx, iter.readBigInt());
+ Rooted<BigInt*> rhs(cx, iter.readBigInt());
- MOZ_ASSERT(lhs.isBigInt() && rhs.isBigInt());
- if (!js::BitOr(cx, &lhs, &rhs, &result)) {
+ BigInt* result = BigInt::bitOr(cx, lhs, rhs);
+ if (!result) {
return false;
}
- iter.storeInstructionResult(result);
+ iter.storeInstructionResult(BigIntValue(result));
return true;
}
@@ -677,16 +671,15 @@ bool MBigIntBitXor::writeRecoverData(CompactBufferWriter& writer) const {
RBigIntBitXor::RBigIntBitXor(CompactBufferReader& reader) {}
bool RBigIntBitXor::recover(JSContext* cx, SnapshotIterator& iter) const {
- RootedValue lhs(cx, iter.read());
- RootedValue rhs(cx, iter.read());
- RootedValue result(cx);
+ Rooted<BigInt*> lhs(cx, iter.readBigInt());
+ Rooted<BigInt*> rhs(cx, iter.readBigInt());
- MOZ_ASSERT(lhs.isBigInt() && rhs.isBigInt());
- if (!js::BitXor(cx, &lhs, &rhs, &result)) {
+ BigInt* result = BigInt::bitXor(cx, lhs, rhs);
+ if (!result) {
return false;
}
- iter.storeInstructionResult(result);
+ iter.storeInstructionResult(BigIntValue(result));
return true;
}
@@ -699,16 +692,15 @@ bool MBigIntLsh::writeRecoverData(CompactBufferWriter& writer) const {
RBigIntLsh::RBigIntLsh(CompactBufferReader& reader) {}
bool RBigIntLsh::recover(JSContext* cx, SnapshotIterator& iter) const {
- RootedValue lhs(cx, iter.read());
- RootedValue rhs(cx, iter.read());
- RootedValue result(cx);
+ Rooted<BigInt*> lhs(cx, iter.readBigInt());
+ Rooted<BigInt*> rhs(cx, iter.readBigInt());
- MOZ_ASSERT(lhs.isBigInt() && rhs.isBigInt());
- if (!js::BitLsh(cx, &lhs, &rhs, &result)) {
+ BigInt* result = BigInt::lsh(cx, lhs, rhs);
+ if (!result) {
return false;
}
- iter.storeInstructionResult(result);
+ iter.storeInstructionResult(BigIntValue(result));
return true;
}
@@ -721,16 +713,15 @@ bool MBigIntRsh::writeRecoverData(CompactBufferWriter& writer) const {
RBigIntRsh::RBigIntRsh(CompactBufferReader& reader) {}
bool RBigIntRsh::recover(JSContext* cx, SnapshotIterator& iter) const {
- RootedValue lhs(cx, iter.read());
- RootedValue rhs(cx, iter.read());
- RootedValue result(cx);
+ Rooted<BigInt*> lhs(cx, iter.readBigInt());
+ Rooted<BigInt*> rhs(cx, iter.readBigInt());
- MOZ_ASSERT(lhs.isBigInt() && rhs.isBigInt());
- if (!js::BitRsh(cx, &lhs, &rhs, &result)) {
+ BigInt* result = BigInt::rsh(cx, lhs, rhs);
+ if (!result) {
return false;
}
- iter.storeInstructionResult(result);
+ iter.storeInstructionResult(BigIntValue(result));
return true;
}
@@ -743,15 +734,14 @@ bool MBigIntIncrement::writeRecoverData(CompactBufferWriter& writer) const {
RBigIntIncrement::RBigIntIncrement(CompactBufferReader& reader) {}
bool RBigIntIncrement::recover(JSContext* cx, SnapshotIterator& iter) const {
- RootedValue operand(cx, iter.read());
- RootedValue result(cx);
+ Rooted<BigInt*> operand(cx, iter.readBigInt());
- MOZ_ASSERT(operand.isBigInt());
- if (!js::IncOperation(cx, operand, &result)) {
+ BigInt* result = BigInt::inc(cx, operand);
+ if (!result) {
return false;
}
- iter.storeInstructionResult(result);
+ iter.storeInstructionResult(BigIntValue(result));
return true;
}
@@ -764,15 +754,14 @@ bool MBigIntDecrement::writeRecoverData(CompactBufferWriter& writer) const {
RBigIntDecrement::RBigIntDecrement(CompactBufferReader& reader) {}
bool RBigIntDecrement::recover(JSContext* cx, SnapshotIterator& iter) const {
- RootedValue operand(cx, iter.read());
- RootedValue result(cx);
+ Rooted<BigInt*> operand(cx, iter.readBigInt());
- MOZ_ASSERT(operand.isBigInt());
- if (!js::DecOperation(cx, operand, &result)) {
+ BigInt* result = BigInt::dec(cx, operand);
+ if (!result) {
return false;
}
- iter.storeInstructionResult(result);
+ iter.storeInstructionResult(BigIntValue(result));
return true;
}
@@ -785,15 +774,14 @@ bool MBigIntNegate::writeRecoverData(CompactBufferWriter& writer) const {
RBigIntNegate::RBigIntNegate(CompactBufferReader& reader) {}
bool RBigIntNegate::recover(JSContext* cx, SnapshotIterator& iter) const {
- RootedValue operand(cx, iter.read());
- RootedValue result(cx);
+ Rooted<BigInt*> operand(cx, iter.readBigInt());
- MOZ_ASSERT(operand.isBigInt());
- if (!js::NegOperation(cx, &operand, &result)) {
+ BigInt* result = BigInt::neg(cx, operand);
+ if (!result) {
return false;
}
- iter.storeInstructionResult(result);
+ iter.storeInstructionResult(BigIntValue(result));
return true;
}
@@ -806,15 +794,14 @@ bool MBigIntBitNot::writeRecoverData(CompactBufferWriter& writer) const {
RBigIntBitNot::RBigIntBitNot(CompactBufferReader& reader) {}
bool RBigIntBitNot::recover(JSContext* cx, SnapshotIterator& iter) const {
- RootedValue operand(cx, iter.read());
- RootedValue result(cx);
+ Rooted<BigInt*> operand(cx, iter.readBigInt());
- MOZ_ASSERT(operand.isBigInt());
- if (!js::BitNot(cx, &operand, &result)) {
+ BigInt* result = BigInt::bitNot(cx, operand);
+ if (!result) {
return false;
}
- iter.storeInstructionResult(result);
+ iter.storeInstructionResult(BigIntValue(result));
return true;
}
@@ -910,7 +897,7 @@ bool RConcat::recover(JSContext* cx, SnapshotIterator& iter) const {
RStringLength::RStringLength(CompactBufferReader& reader) {}
bool RStringLength::recover(JSContext* cx, SnapshotIterator& iter) const {
- JSString* string = iter.read().toString();
+ JSString* string = iter.readString();
static_assert(JSString::MAX_LENGTH <= INT32_MAX,
"Can cast string length to int32_t");
@@ -953,7 +940,7 @@ bool MFloor::writeRecoverData(CompactBufferWriter& writer) const {
RFloor::RFloor(CompactBufferReader& reader) {}
bool RFloor::recover(JSContext* cx, SnapshotIterator& iter) const {
- double num = iter.read().toNumber();
+ double num = iter.readNumber();
double result = js::math_floor_impl(num);
iter.storeInstructionResult(NumberValue(result));
@@ -969,7 +956,7 @@ bool MCeil::writeRecoverData(CompactBufferWriter& writer) const {
RCeil::RCeil(CompactBufferReader& reader) {}
bool RCeil::recover(JSContext* cx, SnapshotIterator& iter) const {
- double num = iter.read().toNumber();
+ double num = iter.readNumber();
double result = js::math_ceil_impl(num);
iter.storeInstructionResult(NumberValue(result));
@@ -985,7 +972,7 @@ bool MRound::writeRecoverData(CompactBufferWriter& writer) const {
RRound::RRound(CompactBufferReader& reader) {}
bool RRound::recover(JSContext* cx, SnapshotIterator& iter) const {
- double num = iter.read().toNumber();
+ double num = iter.readNumber();
double result = js::math_round_impl(num);
iter.storeInstructionResult(NumberValue(result));
@@ -1001,7 +988,7 @@ bool MTrunc::writeRecoverData(CompactBufferWriter& writer) const {
RTrunc::RTrunc(CompactBufferReader& reader) {}
bool RTrunc::recover(JSContext* cx, SnapshotIterator& iter) const {
- double num = iter.read().toNumber();
+ double num = iter.readNumber();
double result = js::math_trunc_impl(num);
iter.storeInstructionResult(NumberValue(result));
@@ -1017,21 +1004,18 @@ bool MCharCodeAt::writeRecoverData(CompactBufferWriter& writer) const {
RCharCodeAt::RCharCodeAt(CompactBufferReader& reader) {}
bool RCharCodeAt::recover(JSContext* cx, SnapshotIterator& iter) const {
- RootedString string(cx, iter.read().toString());
- int32_t index = iter.read().toInt32();
+ JSString* string = iter.readString();
- RootedValue result(cx);
- if (0 <= index && size_t(index) < string->length()) {
- char16_t c;
- if (!string->getChar(cx, index, &c)) {
- return false;
- }
- result.setInt32(c);
- } else {
- result.setNaN();
+ // Int32 because |index| is computed from MBoundsCheck.
+ int32_t index = iter.readInt32();
+ MOZ_RELEASE_ASSERT(0 <= index && size_t(index) < string->length());
+
+ char16_t c;
+ if (!string->getChar(cx, index, &c)) {
+ return false;
}
- iter.storeInstructionResult(result);
+ iter.storeInstructionResult(Int32Value(c));
return true;
}
@@ -1044,7 +1028,8 @@ bool MFromCharCode::writeRecoverData(CompactBufferWriter& writer) const {
RFromCharCode::RFromCharCode(CompactBufferReader& reader) {}
bool RFromCharCode::recover(JSContext* cx, SnapshotIterator& iter) const {
- int32_t charCode = iter.read().toInt32();
+ // Number because |charCode| is computed from (recoverable) user input.
+ int32_t charCode = JS::ToInt32(iter.readNumber());
JSString* str = StringFromCharCode(cx, charCode);
if (!str) {
@@ -1068,7 +1053,8 @@ RFromCharCodeEmptyIfNegative::RFromCharCodeEmptyIfNegative(
bool RFromCharCodeEmptyIfNegative::recover(JSContext* cx,
SnapshotIterator& iter) const {
- int32_t charCode = iter.read().toInt32();
+ // Int32 because |charCode| is computed from MCharCodeAtOrNegative.
+ int32_t charCode = iter.readInt32();
JSString* str;
if (charCode < 0) {
@@ -1093,8 +1079,8 @@ bool MPow::writeRecoverData(CompactBufferWriter& writer) const {
RPow::RPow(CompactBufferReader& reader) {}
bool RPow::recover(JSContext* cx, SnapshotIterator& iter) const {
- double base = iter.read().toNumber();
- double power = iter.read().toNumber();
+ double base = iter.readNumber();
+ double power = iter.readNumber();
double result = ecmaPow(base, power);
iter.storeInstructionResult(NumberValue(result));
@@ -1110,7 +1096,7 @@ bool MPowHalf::writeRecoverData(CompactBufferWriter& writer) const {
RPowHalf::RPowHalf(CompactBufferReader& reader) {}
bool RPowHalf::recover(JSContext* cx, SnapshotIterator& iter) const {
- double base = iter.read().toNumber();
+ double base = iter.readNumber();
double power = 0.5;
double result = ecmaPow(base, power);
@@ -1128,8 +1114,8 @@ bool MMinMax::writeRecoverData(CompactBufferWriter& writer) const {
RMinMax::RMinMax(CompactBufferReader& reader) { isMax_ = reader.readByte(); }
bool RMinMax::recover(JSContext* cx, SnapshotIterator& iter) const {
- double x = iter.read().toNumber();
- double y = iter.read().toNumber();
+ double x = iter.readNumber();
+ double y = iter.readNumber();
double result;
if (isMax_) {
@@ -1151,7 +1137,7 @@ bool MAbs::writeRecoverData(CompactBufferWriter& writer) const {
RAbs::RAbs(CompactBufferReader& reader) {}
bool RAbs::recover(JSContext* cx, SnapshotIterator& iter) const {
- double num = iter.read().toNumber();
+ double num = iter.readNumber();
double result = js::math_abs_impl(num);
iter.storeInstructionResult(NumberValue(result));
@@ -1170,7 +1156,7 @@ RSqrt::RSqrt(CompactBufferReader& reader) {
}
bool RSqrt::recover(JSContext* cx, SnapshotIterator& iter) const {
- double num = iter.read().toNumber();
+ double num = iter.readNumber();
double result = js::math_sqrt_impl(num);
// MIRType::Float32 is a specialization embedding the fact that the result is
@@ -1192,8 +1178,8 @@ bool MAtan2::writeRecoverData(CompactBufferWriter& writer) const {
RAtan2::RAtan2(CompactBufferReader& reader) {}
bool RAtan2::recover(JSContext* cx, SnapshotIterator& iter) const {
- double y = iter.read().toNumber();
- double x = iter.read().toNumber();
+ double y = iter.readNumber();
+ double x = iter.readNumber();
double result = js::ecmaAtan2(y, x);
iter.storeInstructionResult(DoubleValue(result));
@@ -1218,7 +1204,7 @@ bool RHypot::recover(JSContext* cx, SnapshotIterator& iter) const {
}
for (uint32_t i = 0; i < numOperands_; ++i) {
- vec.infallibleAppend(iter.read());
+ vec.infallibleAppend(NumberValue(iter.readNumber()));
}
RootedValue result(cx);
@@ -1265,7 +1251,7 @@ bool MSign::writeRecoverData(CompactBufferWriter& writer) const {
RSign::RSign(CompactBufferReader& reader) {}
bool RSign::recover(JSContext* cx, SnapshotIterator& iter) const {
- double num = iter.read().toNumber();
+ double num = iter.readNumber();
double result = js::math_sign_impl(num);
iter.storeInstructionResult(NumberValue(result));
@@ -1322,7 +1308,7 @@ RMathFunction::RMathFunction(CompactBufferReader& reader) {
}
bool RMathFunction::recover(JSContext* cx, SnapshotIterator& iter) const {
- double num = iter.read().toNumber();
+ double num = iter.readNumber();
double result;
switch (function_) {
@@ -1431,8 +1417,8 @@ bool MStringSplit::writeRecoverData(CompactBufferWriter& writer) const {
RStringSplit::RStringSplit(CompactBufferReader& reader) {}
bool RStringSplit::recover(JSContext* cx, SnapshotIterator& iter) const {
- RootedString str(cx, iter.read().toString());
- RootedString sep(cx, iter.read().toString());
+ RootedString str(cx, iter.readString());
+ RootedString sep(cx, iter.readString());
JSObject* res = StringSplitString(cx, str, sep, INT32_MAX);
if (!res) {
@@ -1452,7 +1438,7 @@ bool MNaNToZero::writeRecoverData(CompactBufferWriter& writer) const {
RNaNToZero::RNaNToZero(CompactBufferReader& reader) {}
bool RNaNToZero::recover(JSContext* cx, SnapshotIterator& iter) const {
- double v = iter.read().toNumber();
+ double v = iter.readNumber();
if (std::isnan(v) || mozilla::IsNegativeZero(v)) {
v = 0.0;
}
@@ -1470,9 +1456,11 @@ bool MRegExpMatcher::writeRecoverData(CompactBufferWriter& writer) const {
RRegExpMatcher::RRegExpMatcher(CompactBufferReader& reader) {}
bool RRegExpMatcher::recover(JSContext* cx, SnapshotIterator& iter) const {
- RootedObject regexp(cx, &iter.read().toObject());
- RootedString input(cx, iter.read().toString());
- int32_t lastIndex = iter.read().toInt32();
+ RootedObject regexp(cx, iter.readObject());
+ RootedString input(cx, iter.readString());
+
+ // Int32 because |lastIndex| is computed from transpiled self-hosted call.
+ int32_t lastIndex = iter.readInt32();
RootedValue result(cx);
if (!RegExpMatcherRaw(cx, regexp, input, lastIndex, nullptr, &result)) {
@@ -1507,7 +1495,8 @@ bool MTypeOfName::writeRecoverData(CompactBufferWriter& writer) const {
RTypeOfName::RTypeOfName(CompactBufferReader& reader) {}
bool RTypeOfName::recover(JSContext* cx, SnapshotIterator& iter) const {
- int32_t type = iter.read().toInt32();
+ // Int32 because |type| is computed from MTypeOf.
+ int32_t type = iter.readInt32();
MOZ_ASSERT(JSTYPE_UNDEFINED <= type && type < JSTYPE_LIMIT);
JSString* name = TypeName(JSType(type), *cx->runtime()->commonNames);
@@ -1548,7 +1537,7 @@ bool MToFloat32::writeRecoverData(CompactBufferWriter& writer) const {
RToFloat32::RToFloat32(CompactBufferReader& reader) {}
bool RToFloat32::recover(JSContext* cx, SnapshotIterator& iter) const {
- double num = iter.read().toNumber();
+ double num = iter.readNumber();
double result = js::RoundFloat32(num);
iter.storeInstructionResult(DoubleValue(result));
@@ -1588,7 +1577,7 @@ bool MNewObject::writeRecoverData(CompactBufferWriter& writer) const {
RNewObject::RNewObject(CompactBufferReader& reader) {}
bool RNewObject::recover(JSContext* cx, SnapshotIterator& iter) const {
- RootedObject templateObject(cx, &iter.read().toObject());
+ RootedObject templateObject(cx, iter.readObject());
// See CodeGenerator::visitNewObjectVMCall.
// Note that recover instructions are only used if mode == ObjectCreate.
@@ -1622,8 +1611,7 @@ RNewPlainObject::RNewPlainObject(CompactBufferReader& reader) {
}
bool RNewPlainObject::recover(JSContext* cx, SnapshotIterator& iter) const {
- Rooted<SharedShape*> shape(cx,
- &iter.read().toGCCellPtr().as<Shape>().asShared());
+ Rooted<SharedShape*> shape(cx, &iter.readGCCellPtr().as<Shape>().asShared());
// See CodeGenerator::visitNewPlainObject.
JSObject* resultObject =
@@ -1676,7 +1664,7 @@ bool MNewTypedArray::writeRecoverData(CompactBufferWriter& writer) const {
RNewTypedArray::RNewTypedArray(CompactBufferReader& reader) {}
bool RNewTypedArray::recover(JSContext* cx, SnapshotIterator& iter) const {
- RootedObject templateObject(cx, &iter.read().toObject());
+ RootedObject templateObject(cx, iter.readObject());
size_t length = templateObject.as<FixedLengthTypedArrayObject>()->length();
MOZ_ASSERT(length <= INT32_MAX,
@@ -1704,7 +1692,7 @@ RNewArray::RNewArray(CompactBufferReader& reader) {
}
bool RNewArray::recover(JSContext* cx, SnapshotIterator& iter) const {
- RootedObject templateObject(cx, &iter.read().toObject());
+ RootedObject templateObject(cx, iter.readObject());
Rooted<Shape*> shape(cx, templateObject->shape());
ArrayObject* resultObject = NewArrayWithShape(cx, count_, shape);
@@ -1728,7 +1716,7 @@ RNewIterator::RNewIterator(CompactBufferReader& reader) {
}
bool RNewIterator::recover(JSContext* cx, SnapshotIterator& iter) const {
- RootedObject templateObject(cx, &iter.read().toObject());
+ RootedObject templateObject(cx, iter.readObject());
JSObject* resultObject = nullptr;
switch (MNewIterator::Type(type_)) {
@@ -1760,8 +1748,8 @@ bool MLambda::writeRecoverData(CompactBufferWriter& writer) const {
RLambda::RLambda(CompactBufferReader& reader) {}
bool RLambda::recover(JSContext* cx, SnapshotIterator& iter) const {
- RootedObject scopeChain(cx, &iter.read().toObject());
- RootedFunction fun(cx, &iter.read().toObject().as<JSFunction>());
+ RootedObject scopeChain(cx, iter.readObject());
+ RootedFunction fun(cx, &iter.readObject()->as<JSFunction>());
JSObject* resultObject = js::Lambda(cx, fun, scopeChain);
if (!resultObject) {
@@ -1781,9 +1769,9 @@ bool MFunctionWithProto::writeRecoverData(CompactBufferWriter& writer) const {
RFunctionWithProto::RFunctionWithProto(CompactBufferReader& reader) {}
bool RFunctionWithProto::recover(JSContext* cx, SnapshotIterator& iter) const {
- RootedObject scopeChain(cx, &iter.read().toObject());
- RootedObject prototype(cx, &iter.read().toObject());
- RootedFunction fun(cx, &iter.read().toObject().as<JSFunction>());
+ RootedObject scopeChain(cx, iter.readObject());
+ RootedObject prototype(cx, iter.readObject());
+ RootedFunction fun(cx, &iter.readObject()->as<JSFunction>());
JSObject* resultObject =
js::FunWithProtoOperation(cx, fun, scopeChain, prototype);
@@ -1804,7 +1792,7 @@ bool MNewCallObject::writeRecoverData(CompactBufferWriter& writer) const {
RNewCallObject::RNewCallObject(CompactBufferReader& reader) {}
bool RNewCallObject::recover(JSContext* cx, SnapshotIterator& iter) const {
- Rooted<CallObject*> templateObj(cx, &iter.read().toObject().as<CallObject>());
+ Rooted<CallObject*> templateObj(cx, &iter.readObject()->as<CallObject>());
Rooted<SharedShape*> shape(cx, templateObj->sharedShape());
@@ -1832,7 +1820,7 @@ bool MObjectKeys::writeRecoverData(CompactBufferWriter& writer) const {
RObjectKeys::RObjectKeys(CompactBufferReader& reader) {}
bool RObjectKeys::recover(JSContext* cx, SnapshotIterator& iter) const {
- Rooted<JSObject*> obj(cx, &iter.read().toObject());
+ Rooted<JSObject*> obj(cx, iter.readObject());
JSObject* resultKeys = ObjectKeys(cx, obj);
if (!resultKeys) {
@@ -1855,7 +1843,7 @@ RObjectState::RObjectState(CompactBufferReader& reader) {
}
bool RObjectState::recover(JSContext* cx, SnapshotIterator& iter) const {
- RootedObject object(cx, &iter.read().toObject());
+ RootedObject object(cx, iter.readObject());
Handle<NativeObject*> nativeObject = object.as<NativeObject>();
MOZ_ASSERT(!Watchtower::watchesPropertyModification(nativeObject));
MOZ_ASSERT(nativeObject->slotSpan() == numSlots());
@@ -1881,8 +1869,10 @@ RArrayState::RArrayState(CompactBufferReader& reader) {
}
bool RArrayState::recover(JSContext* cx, SnapshotIterator& iter) const {
- ArrayObject* object = &iter.read().toObject().as<ArrayObject>();
- uint32_t initLength = iter.read().toInt32();
+ ArrayObject* object = &iter.readObject()->as<ArrayObject>();
+
+ // Int32 because |initLength| is computed from MConstant.
+ uint32_t initLength = iter.readInt32();
MOZ_ASSERT(object->getDenseInitializedLength() == 0,
"initDenseElement call below relies on this");
@@ -1903,37 +1893,6 @@ bool RArrayState::recover(JSContext* cx, SnapshotIterator& iter) const {
return true;
}
-bool MSetArrayLength::writeRecoverData(CompactBufferWriter& writer) const {
- MOZ_ASSERT(canRecoverOnBailout());
- // For simplicity, we capture directly the object instead of the elements
- // pointer.
- MOZ_ASSERT(elements()->type() != MIRType::Elements);
- writer.writeUnsigned(uint32_t(RInstruction::Recover_SetArrayLength));
- return true;
-}
-
-bool MSetArrayLength::canRecoverOnBailout() const {
- return isRecoveredOnBailout();
-}
-
-RSetArrayLength::RSetArrayLength(CompactBufferReader& reader) {}
-
-bool RSetArrayLength::recover(JSContext* cx, SnapshotIterator& iter) const {
- Rooted<ArrayObject*> obj(cx, &iter.read().toObject().as<ArrayObject>());
- RootedValue len(cx, iter.read());
-
- RootedId id(cx, NameToId(cx->names().length));
- Rooted<PropertyDescriptor> desc(
- cx, PropertyDescriptor::Data(len, JS::PropertyAttribute::Writable));
- ObjectOpResult error;
- if (!ArraySetLength(cx, obj, id, desc, error)) {
- return false;
- }
-
- iter.storeInstructionResult(ObjectValue(*obj));
- return true;
-}
-
bool MAssertRecoveredOnBailout::writeRecoverData(
CompactBufferWriter& writer) const {
MOZ_ASSERT(canRecoverOnBailout());
@@ -1966,9 +1925,9 @@ RStringReplace::RStringReplace(CompactBufferReader& reader) {
}
bool RStringReplace::recover(JSContext* cx, SnapshotIterator& iter) const {
- RootedString string(cx, iter.read().toString());
- RootedString pattern(cx, iter.read().toString());
- RootedString replace(cx, iter.read().toString());
+ RootedString string(cx, iter.readString());
+ RootedString pattern(cx, iter.readString());
+ RootedString replace(cx, iter.readString());
JSString* result =
isFlatReplacement_
@@ -1992,9 +1951,20 @@ bool MSubstr::writeRecoverData(CompactBufferWriter& writer) const {
RSubstr::RSubstr(CompactBufferReader& reader) {}
bool RSubstr::recover(JSContext* cx, SnapshotIterator& iter) const {
- RootedString str(cx, iter.read().toString());
- int32_t begin = iter.read().toInt32();
- int32_t length = iter.read().toInt32();
+ RootedString str(cx, iter.readString());
+
+ // Int32 because |begin| is computed from MStringTrimStartIndex, MConstant,
+ // or CallSubstringKernelResult.
+ int32_t begin = iter.readInt32();
+
+ // |length| is computed from MSub(truncated), MStringTrimEndIndex, or
+ // CallSubstringKernelResult. The current MSub inputs won't overflow, so when
+ // RSub recovers the MSub instruction, the input will be representable as an
+ // Int32. This is only true as long as RSub calls |js::SubOperation|, which in
+ // turn calls |JS::Value::setNumber|. We don't want to rely on this exact call
+ // sequence, so instead use |readNumber| here and then release-assert the
+ // number is exactly representable as an Int32.
+ int32_t length = mozilla::ReleaseAssertedCast<int32_t>(iter.readNumber());
JSString* result = SubstringKernel(cx, str, begin, length);
if (!result) {
@@ -2014,10 +1984,11 @@ bool MAtomicIsLockFree::writeRecoverData(CompactBufferWriter& writer) const {
RAtomicIsLockFree::RAtomicIsLockFree(CompactBufferReader& reader) {}
bool RAtomicIsLockFree::recover(JSContext* cx, SnapshotIterator& iter) const {
- Value operand = iter.read();
- MOZ_ASSERT(operand.isInt32());
+ double dsize = JS::ToInteger(iter.readNumber());
- bool result = AtomicOperations::isLockfreeJS(operand.toInt32());
+ int32_t size;
+ bool result = mozilla::NumberEqualsInt32(dsize, &size) &&
+ AtomicOperations::isLockfreeJS(size);
iter.storeInstructionResult(BooleanValue(result));
return true;
}
@@ -2031,10 +2002,12 @@ bool MBigIntAsIntN::writeRecoverData(CompactBufferWriter& writer) const {
RBigIntAsIntN::RBigIntAsIntN(CompactBufferReader& reader) {}
bool RBigIntAsIntN::recover(JSContext* cx, SnapshotIterator& iter) const {
- int32_t bits = iter.read().toInt32();
- RootedBigInt input(cx, iter.read().toBigInt());
-
+ // Int32 because |bits| is computed from MGuardInt32IsNonNegative.
+ int32_t bits = iter.readInt32();
MOZ_ASSERT(bits >= 0);
+
+ RootedBigInt input(cx, iter.readBigInt());
+
BigInt* result = BigInt::asIntN(cx, input, bits);
if (!result) {
return false;
@@ -2053,10 +2026,12 @@ bool MBigIntAsUintN::writeRecoverData(CompactBufferWriter& writer) const {
RBigIntAsUintN::RBigIntAsUintN(CompactBufferReader& reader) {}
bool RBigIntAsUintN::recover(JSContext* cx, SnapshotIterator& iter) const {
- int32_t bits = iter.read().toInt32();
- RootedBigInt input(cx, iter.read().toBigInt());
-
+ // Int32 because |bits| is computed from MGuardInt32IsNonNegative.
+ int32_t bits = iter.readInt32();
MOZ_ASSERT(bits >= 0);
+
+ RootedBigInt input(cx, iter.readBigInt());
+
BigInt* result = BigInt::asUintN(cx, input, bits);
if (!result) {
return false;
@@ -2077,7 +2052,7 @@ RCreateArgumentsObject::RCreateArgumentsObject(CompactBufferReader& reader) {}
bool RCreateArgumentsObject::recover(JSContext* cx,
SnapshotIterator& iter) const {
- RootedObject callObject(cx, &iter.read().toObject());
+ RootedObject callObject(cx, iter.readObject());
RootedObject result(
cx, ArgumentsObject::createForIon(cx, iter.frame(), callObject));
if (!result) {
@@ -2104,8 +2079,8 @@ RCreateInlinedArgumentsObject::RCreateInlinedArgumentsObject(
bool RCreateInlinedArgumentsObject::recover(JSContext* cx,
SnapshotIterator& iter) const {
- RootedObject callObject(cx, &iter.read().toObject());
- RootedFunction callee(cx, &iter.read().toObject().as<JSFunction>());
+ RootedObject callObject(cx, iter.readObject());
+ RootedFunction callee(cx, &iter.readObject()->as<JSFunction>());
JS::RootedValueArray<ArgumentsObject::MaxInlinedArgs> argsArray(cx);
for (uint32_t i = 0; i < numActuals_; i++) {
@@ -2136,7 +2111,8 @@ RRest::RRest(CompactBufferReader& reader) {
bool RRest::recover(JSContext* cx, SnapshotIterator& iter) const {
JitFrameLayout* frame = iter.frame();
- uint32_t numActuals = iter.read().toInt32();
+ // Int32 because |numActuals| is computed from MArgumentsLength.
+ uint32_t numActuals = iter.readInt32();
MOZ_ASSERT(numActuals == frame->numActualArgs());
uint32_t numFormals = numFormals_;
diff --git a/js/src/jit/Recover.h b/js/src/jit/Recover.h
index 7cc46c636d..878204de83 100644
--- a/js/src/jit/Recover.h
+++ b/js/src/jit/Recover.h
@@ -129,7 +129,6 @@ namespace jit {
_(ObjectKeys) \
_(ObjectState) \
_(ArrayState) \
- _(SetArrayLength) \
_(AtomicIsLockFree) \
_(BigIntAsIntN) \
_(BigIntAsUintN) \
@@ -882,14 +881,6 @@ class RArrayState final : public RInstruction {
SnapshotIterator& iter) const override;
};
-class RSetArrayLength final : public RInstruction {
- public:
- RINSTRUCTION_HEADER_NUM_OP_(SetArrayLength, 2)
-
- [[nodiscard]] bool recover(JSContext* cx,
- SnapshotIterator& iter) const override;
-};
-
class RAtomicIsLockFree final : public RInstruction {
public:
RINSTRUCTION_HEADER_NUM_OP_(AtomicIsLockFree, 1)
diff --git a/js/src/jit/Trampoline.cpp b/js/src/jit/Trampoline.cpp
index 85661784a7..e6d0cd31c9 100644
--- a/js/src/jit/Trampoline.cpp
+++ b/js/src/jit/Trampoline.cpp
@@ -96,18 +96,13 @@ void JitRuntime::generateProfilerExitFrameTailStub(MacroAssembler& masm,
// |
// ^--- IonICCall <---- Ion
// |
- // ^--- Arguments Rectifier
- // | ^
- // | |
- // | ^--- Ion
- // | |
- // | ^--- Baseline Stub <---- Baseline
- // | |
- // | ^--- Entry Frame (CppToJSJit or WasmToJSJit)
+ // ^--- Entry Frame (BaselineInterpreter) (unwrapped)
// |
- // ^--- Entry Frame (CppToJSJit or WasmToJSJit)
+ // ^--- Arguments Rectifier (unwrapped)
+ // |
+ // ^--- Trampoline Native (unwrapped)
// |
- // ^--- Entry Frame (BaselineInterpreter)
+ // ^--- Entry Frame (CppToJSJit or WasmToJSJit)
//
// NOTE: Keep this in sync with JSJitProfilingFrameIterator::moveToNextFrame!
@@ -153,6 +148,7 @@ void JitRuntime::generateProfilerExitFrameTailStub(MacroAssembler& masm,
Label handle_BaselineOrIonJS;
Label handle_BaselineStub;
Label handle_Rectifier;
+ Label handle_TrampolineNative;
Label handle_BaselineInterpreterEntry;
Label handle_IonICCall;
Label handle_Entry;
@@ -176,6 +172,8 @@ void JitRuntime::generateProfilerExitFrameTailStub(MacroAssembler& masm,
&handle_BaselineOrIonJS);
masm.branch32(Assembler::Equal, scratch, Imm32(FrameType::IonICCall),
&handle_IonICCall);
+ masm.branch32(Assembler::Equal, scratch, Imm32(FrameType::TrampolineNative),
+ &handle_TrampolineNative);
masm.branch32(Assembler::Equal, scratch, Imm32(FrameType::WasmToJSJit),
&handle_Entry);
@@ -237,9 +235,21 @@ void JitRuntime::generateProfilerExitFrameTailStub(MacroAssembler& masm,
// There can be multiple previous frame types so just "unwrap" the arguments
// rectifier frame and try again.
masm.loadPtr(Address(fpScratch, CallerFPOffset), fpScratch);
- emitAssertPrevFrameType(fpScratch, scratch,
- {FrameType::IonJS, FrameType::BaselineStub,
- FrameType::CppToJSJit, FrameType::WasmToJSJit});
+ emitAssertPrevFrameType(
+ fpScratch, scratch,
+ {FrameType::IonJS, FrameType::BaselineStub, FrameType::TrampolineNative,
+ FrameType::CppToJSJit, FrameType::WasmToJSJit});
+ masm.jump(&again);
+ }
+
+ masm.bind(&handle_TrampolineNative);
+ {
+ // Unwrap this frame, similar to arguments rectifier frames.
+ masm.loadPtr(Address(fpScratch, CallerFPOffset), fpScratch);
+ emitAssertPrevFrameType(
+ fpScratch, scratch,
+ {FrameType::IonJS, FrameType::BaselineStub, FrameType::Rectifier,
+ FrameType::CppToJSJit, FrameType::WasmToJSJit});
masm.jump(&again);
}
diff --git a/js/src/jit/TrampolineNatives.cpp b/js/src/jit/TrampolineNatives.cpp
new file mode 100644
index 0000000000..0bde6d9985
--- /dev/null
+++ b/js/src/jit/TrampolineNatives.cpp
@@ -0,0 +1,274 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/TrampolineNatives.h"
+
+#include "jit/CalleeToken.h"
+#include "jit/Ion.h"
+#include "jit/JitCommon.h"
+#include "jit/JitRuntime.h"
+#include "jit/MacroAssembler.h"
+#include "jit/PerfSpewer.h"
+#include "js/CallArgs.h"
+#include "js/experimental/JitInfo.h"
+
+#include "jit/MacroAssembler-inl.h"
+#include "vm/Activation-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+#define ADD_NATIVE(native) \
+ const JSJitInfo js::jit::JitInfo_##native{ \
+ {nullptr}, \
+ {uint16_t(TrampolineNative::native)}, \
+ {0}, \
+ JSJitInfo::TrampolineNative};
+TRAMPOLINE_NATIVE_LIST(ADD_NATIVE)
+#undef ADD_NATIVE
+
+void js::jit::SetTrampolineNativeJitEntry(JSContext* cx, JSFunction* fun,
+ TrampolineNative native) {
+ if (!cx->runtime()->jitRuntime()) {
+ // No JIT support so there's no trampoline.
+ return;
+ }
+ void** entry = cx->runtime()->jitRuntime()->trampolineNativeJitEntry(native);
+ MOZ_ASSERT(entry);
+ MOZ_ASSERT(*entry);
+ fun->setTrampolineNativeJitEntry(entry);
+}
+
+uint32_t JitRuntime::generateArraySortTrampoline(MacroAssembler& masm) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateArraySortTrampoline");
+
+ const uint32_t offset = startTrampolineCode(masm);
+
+ // The stack for the trampoline frame will look like this:
+ //
+ // [TrampolineNativeFrameLayout]
+ // * this and arguments passed by the caller
+ // * CalleeToken
+ // * Descriptor
+ // * Return Address
+ // * Saved frame pointer <= FramePointer
+ // [ArraySortData]
+ // * ...
+ // * Comparator this + argument Values --+ -> comparator JitFrameLayout
+ // * Comparator (CalleeToken) |
+ // * Descriptor ----+ <= StackPointer
+ //
+ // The call to the comparator pushes the return address and the frame pointer,
+ // so we check the alignment after pushing these two pointers.
+ constexpr size_t FrameSize = sizeof(ArraySortData);
+ constexpr size_t PushedByCall = 2 * sizeof(void*);
+ static_assert((FrameSize + PushedByCall) % JitStackAlignment == 0);
+
+ // Assert ArraySortData comparator data matches JitFrameLayout.
+ static_assert(PushedByCall + ArraySortData::offsetOfDescriptor() ==
+ JitFrameLayout::offsetOfDescriptor());
+ static_assert(PushedByCall + ArraySortData::offsetOfComparator() ==
+ JitFrameLayout::offsetOfCalleeToken());
+ static_assert(PushedByCall + ArraySortData::offsetOfComparatorThis() ==
+ JitFrameLayout::offsetOfThis());
+ static_assert(PushedByCall + ArraySortData::offsetOfComparatorArgs() ==
+ JitFrameLayout::offsetOfActualArgs());
+ static_assert(CalleeToken_Function == 0,
+ "JSFunction* is valid CalleeToken for non-constructor calls");
+
+ // Compute offsets from FramePointer.
+ constexpr int32_t ComparatorOffset =
+ -int32_t(FrameSize) + ArraySortData::offsetOfComparator();
+ constexpr int32_t RvalOffset =
+ -int32_t(FrameSize) + ArraySortData::offsetOfComparatorReturnValue();
+ constexpr int32_t DescriptorOffset =
+ -int32_t(FrameSize) + ArraySortData::offsetOfDescriptor();
+
+#ifdef JS_USE_LINK_REGISTER
+ masm.pushReturnAddress();
+#endif
+ masm.push(FramePointer);
+ masm.moveStackPtrTo(FramePointer);
+
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ regs.takeUnchecked(ReturnReg);
+ regs.takeUnchecked(JSReturnOperand);
+ Register temp0 = regs.takeAny();
+ Register temp1 = regs.takeAny();
+ Register temp2 = regs.takeAny();
+
+ // Reserve space and check alignment of the comparator frame.
+ masm.reserveStack(FrameSize);
+ masm.assertStackAlignment(JitStackAlignment, PushedByCall);
+
+ // Trampoline control flow looks like this:
+ //
+ // call ArraySortFromJit
+ // goto checkReturnValue
+ // call_comparator:
+ // call comparator
+ // call ArraySortData::sortWithComparator
+ // checkReturnValue:
+ // check return value, jump to call_comparator if needed
+ // return rval
+
+ auto pushExitFrame = [&](Register cxReg, Register scratchReg) {
+ MOZ_ASSERT(masm.framePushed() == FrameSize);
+ masm.PushFrameDescriptor(FrameType::TrampolineNative);
+ masm.Push(ImmWord(0)); // Fake return address.
+ masm.Push(FramePointer);
+ masm.enterFakeExitFrame(cxReg, scratchReg, ExitFrameType::Bare);
+ };
+
+ // Call ArraySortFromJit.
+ using Fn1 = ArraySortResult (*)(JSContext* cx,
+ jit::TrampolineNativeFrameLayout* frame);
+ masm.loadJSContext(temp0);
+ pushExitFrame(temp0, temp1);
+ masm.setupAlignedABICall();
+ masm.passABIArg(temp0);
+ masm.passABIArg(FramePointer);
+ masm.callWithABI<Fn1, ArraySortFromJit>(
+ ABIType::General, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+
+ // Check return value.
+ Label checkReturnValue;
+ masm.jump(&checkReturnValue);
+ masm.setFramePushed(FrameSize);
+
+ // Call the comparator. Store the frame descriptor before each call to ensure
+ // the HASCACHEDSAVEDFRAME_BIT flag from a previous call is cleared.
+ uintptr_t jitCallDescriptor = MakeFrameDescriptorForJitCall(
+ jit::FrameType::TrampolineNative, ArraySortData::ComparatorActualArgs);
+ Label callDone, jitCallFast, jitCallSlow;
+ masm.bind(&jitCallFast);
+ {
+ masm.storePtr(ImmWord(jitCallDescriptor),
+ Address(FramePointer, DescriptorOffset));
+ masm.loadPtr(Address(FramePointer, ComparatorOffset), temp0);
+ masm.loadJitCodeRaw(temp0, temp1);
+ masm.callJit(temp1);
+ masm.jump(&callDone);
+ }
+ masm.bind(&jitCallSlow);
+ {
+ masm.storePtr(ImmWord(jitCallDescriptor),
+ Address(FramePointer, DescriptorOffset));
+ masm.loadPtr(Address(FramePointer, ComparatorOffset), temp0);
+ masm.loadJitCodeRaw(temp0, temp1);
+ masm.switchToObjectRealm(temp0, temp2);
+
+ // Handle arguments underflow.
+ Label noUnderflow, restoreRealm;
+ masm.loadFunctionArgCount(temp0, temp0);
+ masm.branch32(Assembler::BelowOrEqual, temp0,
+ Imm32(ArraySortData::ComparatorActualArgs), &noUnderflow);
+ {
+ Label rectifier;
+ bindLabelToOffset(&rectifier, argumentsRectifierOffset_);
+ masm.call(&rectifier);
+ masm.jump(&restoreRealm);
+ }
+ masm.bind(&noUnderflow);
+ masm.callJit(temp1);
+
+ masm.bind(&restoreRealm);
+ Address calleeToken(FramePointer,
+ TrampolineNativeFrameLayout::offsetOfCalleeToken());
+ masm.loadFunctionFromCalleeToken(calleeToken, temp0);
+ masm.switchToObjectRealm(temp0, temp1);
+ }
+
+ // Store the comparator's return value.
+ masm.bind(&callDone);
+ masm.storeValue(JSReturnOperand, Address(FramePointer, RvalOffset));
+
+ // Call ArraySortData::sortWithComparator.
+ using Fn2 = ArraySortResult (*)(ArraySortData* data);
+ masm.moveStackPtrTo(temp2);
+ masm.loadJSContext(temp0);
+ pushExitFrame(temp0, temp1);
+ masm.setupAlignedABICall();
+ masm.passABIArg(temp2);
+ masm.callWithABI<Fn2, ArraySortData::sortWithComparator>(
+ ABIType::General, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+
+ // Check return value.
+ masm.bind(&checkReturnValue);
+ masm.branch32(Assembler::Equal, ReturnReg,
+ Imm32(int32_t(ArraySortResult::Failure)), masm.failureLabel());
+ masm.freeStack(ExitFrameLayout::SizeWithFooter());
+ masm.branch32(Assembler::Equal, ReturnReg,
+ Imm32(int32_t(ArraySortResult::CallJSSameRealmNoRectifier)),
+ &jitCallFast);
+ masm.branch32(Assembler::Equal, ReturnReg,
+ Imm32(int32_t(ArraySortResult::CallJS)), &jitCallSlow);
+#ifdef DEBUG
+ Label ok;
+ masm.branch32(Assembler::Equal, ReturnReg,
+ Imm32(int32_t(ArraySortResult::Done)), &ok);
+ masm.assumeUnreachable("Unexpected return value");
+ masm.bind(&ok);
+#endif
+
+ masm.loadValue(Address(FramePointer, RvalOffset), JSReturnOperand);
+ masm.moveToStackPtr(FramePointer);
+ masm.pop(FramePointer);
+ masm.ret();
+
+ return offset;
+}
+
+void JitRuntime::generateTrampolineNatives(
+ MacroAssembler& masm, TrampolineNativeJitEntryOffsets& offsets,
+ PerfSpewerRangeRecorder& rangeRecorder) {
+ offsets[TrampolineNative::ArraySort] = generateArraySortTrampoline(masm);
+ rangeRecorder.recordOffset("Trampoline: ArraySort");
+}
+
+bool jit::CallTrampolineNativeJitCode(JSContext* cx, TrampolineNative native,
+ CallArgs& args) {
+ // Use the EnterJit trampoline to enter the native's trampoline code.
+
+ AutoCheckRecursionLimit recursion(cx);
+ if (!recursion.check(cx)) {
+ return false;
+ }
+
+ MOZ_ASSERT(!args.isConstructing());
+ CalleeToken calleeToken = CalleeToToken(&args.callee().as<JSFunction>(),
+ /* constructing = */ false);
+
+ Value* maxArgv = args.array() - 1; // -1 to include |this|
+ size_t maxArgc = args.length() + 1;
+
+ Rooted<Value> result(cx, Int32Value(args.length()));
+
+ AssertRealmUnchanged aru(cx);
+ ActivationEntryMonitor entryMonitor(cx, calleeToken);
+ JitActivation activation(cx);
+
+ EnterJitCode enter = cx->runtime()->jitRuntime()->enterJit();
+ void* code = *cx->runtime()->jitRuntime()->trampolineNativeJitEntry(native);
+
+ CALL_GENERATED_CODE(enter, code, maxArgc, maxArgv, /* osrFrame = */ nullptr,
+ calleeToken, /* envChain = */ nullptr,
+ /* osrNumStackValues = */ 0, result.address());
+
+ // Ensure the counter was reset to zero after exiting from JIT code.
+ MOZ_ASSERT(!cx->isInUnsafeRegion());
+
+ // Release temporary buffer used for OSR into Ion.
+ cx->runtime()->jitRuntime()->freeIonOsrTempData();
+
+ if (result.isMagic()) {
+ MOZ_ASSERT(result.isMagic(JS_ION_ERROR));
+ return false;
+ }
+
+ args.rval().set(result);
+ return true;
+}
diff --git a/js/src/jit/TrampolineNatives.h b/js/src/jit/TrampolineNatives.h
new file mode 100644
index 0000000000..f71a3b707d
--- /dev/null
+++ b/js/src/jit/TrampolineNatives.h
@@ -0,0 +1,60 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_TrampolineNatives_h
+#define jit_TrampolineNatives_h
+
+#include <stdint.h>
+
+#include "js/TypeDecls.h"
+
+// [SMDOC] Trampoline Natives
+//
+// Trampoline natives are JS builtin functions that use the NATIVE_JIT_ENTRY
+// mechanism. This means they have two implementations: the usual native C++
+// implementation and a generated JIT trampoline that JIT callers can call
+// directly using the JIT ABI calling convention. (This is very similar to how
+// calls from JS to WebAssembly are optimized in the JITs.)
+//
+// The JIT trampoline lets us implement some natives in a more efficient way. In
+// particular, it's much faster to call (other) JS functions with JIT code from
+// a JIT trampoline than from C++ code.
+//
+// Trampoline frames use FrameType::TrampolineNative.
+
+class JSJitInfo;
+
+namespace JS {
+class CallArgs;
+} // namespace JS
+
+// List of all trampoline natives.
+#define TRAMPOLINE_NATIVE_LIST(_) _(ArraySort)
+
+namespace js {
+namespace jit {
+
+enum class TrampolineNative : uint16_t {
+#define ADD_NATIVE(native) native,
+ TRAMPOLINE_NATIVE_LIST(ADD_NATIVE)
+#undef ADD_NATIVE
+ Count
+};
+
+#define ADD_NATIVE(native) extern const JSJitInfo JitInfo_##native;
+TRAMPOLINE_NATIVE_LIST(ADD_NATIVE)
+#undef ADD_NATIVE
+
+void SetTrampolineNativeJitEntry(JSContext* cx, JSFunction* fun,
+ TrampolineNative native);
+
+bool CallTrampolineNativeJitCode(JSContext* cx, TrampolineNative native,
+ JS::CallArgs& args);
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_TrampolineNatives_h */
diff --git a/js/src/jit/VMFunctionList-inl.h b/js/src/jit/VMFunctionList-inl.h
index d87b010df6..99b98f17ed 100644
--- a/js/src/jit/VMFunctionList-inl.h
+++ b/js/src/jit/VMFunctionList-inl.h
@@ -211,6 +211,7 @@ namespace jit {
_(InterpretResume, js::jit::InterpretResume) \
_(InterruptCheck, js::jit::InterruptCheck) \
_(InvokeFunction, js::jit::InvokeFunction) \
+ _(InvokeNativeFunction, js::jit::InvokeNativeFunction) \
_(IonBinaryArithICUpdate, js::jit::IonBinaryArithIC::update) \
_(IonBindNameICUpdate, js::jit::IonBindNameIC::update) \
_(IonCheckPrivateFieldICUpdate, js::jit::IonCheckPrivateFieldIC::update) \
diff --git a/js/src/jit/VMFunctions.cpp b/js/src/jit/VMFunctions.cpp
index ed3f63c88c..3ec85a72c2 100644
--- a/js/src/jit/VMFunctions.cpp
+++ b/js/src/jit/VMFunctions.cpp
@@ -545,6 +545,39 @@ bool InvokeFunction(JSContext* cx, HandleObject obj, bool constructing,
return Call(cx, fval, thisv, args, rval);
}
+bool InvokeNativeFunction(JSContext* cx, bool constructing,
+ bool ignoresReturnValue, uint32_t argc, Value* argv,
+ MutableHandleValue rval) {
+ // Ensure argv array is rooted - we may GC in here.
+ size_t numValues = argc + 2 + constructing;
+ RootedExternalValueArray argvRoot(cx, numValues, argv);
+
+ // Data in the argument vector is arranged for a JIT -> C++ call.
+ CallArgs callArgs = CallArgsFromSp(argc + constructing, argv + numValues,
+ constructing, ignoresReturnValue);
+
+ // This function is only called when the callee is a native function.
+ MOZ_ASSERT(callArgs.callee().as<JSFunction>().isNativeWithoutJitEntry());
+
+ if (constructing) {
+ MOZ_ASSERT(callArgs.thisv().isMagic(JS_IS_CONSTRUCTING));
+
+ if (!ConstructFromStack(cx, callArgs)) {
+ return false;
+ }
+
+ MOZ_ASSERT(callArgs.rval().isObject(),
+ "native constructors don't return primitives");
+ } else {
+ if (!CallFromStack(cx, callArgs)) {
+ return false;
+ }
+ }
+
+ rval.set(callArgs.rval());
+ return true;
+}
+
void* GetContextSensitiveInterpreterStub() {
return TlsContext.get()->runtime()->jitRuntime()->interpreterStub().value;
}
@@ -1111,7 +1144,7 @@ bool NormalSuspend(JSContext* cx, HandleObject obj, BaselineFrame* frame,
bool FinalSuspend(JSContext* cx, HandleObject obj, const jsbytecode* pc) {
MOZ_ASSERT(JSOp(*pc) == JSOp::FinalYieldRval);
- AbstractGeneratorObject::finalSuspend(obj);
+ AbstractGeneratorObject::finalSuspend(cx, obj);
return true;
}
diff --git a/js/src/jit/VMFunctions.h b/js/src/jit/VMFunctions.h
index a68dd8279f..b5ac5d700b 100644
--- a/js/src/jit/VMFunctions.h
+++ b/js/src/jit/VMFunctions.h
@@ -354,6 +354,10 @@ struct LastArg<HeadType, TailTypes...> {
uint32_t argc, Value* argv,
MutableHandleValue rval);
+[[nodiscard]] bool InvokeNativeFunction(JSContext* cx, bool constructing,
+ bool ignoresReturnValue, uint32_t argc,
+ Value* argv, MutableHandleValue rval);
+
bool InvokeFromInterpreterStub(JSContext* cx,
InterpreterStubExitFrameLayout* frame);
void* GetContextSensitiveInterpreterStub();
diff --git a/js/src/jit/WarpCacheIRTranspiler.cpp b/js/src/jit/WarpCacheIRTranspiler.cpp
index 9a99e0f5c3..fdaafd00b3 100644
--- a/js/src/jit/WarpCacheIRTranspiler.cpp
+++ b/js/src/jit/WarpCacheIRTranspiler.cpp
@@ -977,7 +977,7 @@ bool WarpCacheIRTranspiler::emitGuardDynamicSlotValue(ObjOperandId objId,
return true;
}
-bool WarpCacheIRTranspiler::emitLoadScriptedProxyHandler(ValOperandId resultId,
+bool WarpCacheIRTranspiler::emitLoadScriptedProxyHandler(ObjOperandId resultId,
ObjOperandId objId) {
MDefinition* obj = getOperand(objId);
@@ -5216,10 +5216,14 @@ bool WarpCacheIRTranspiler::emitLoadOperandResult(ValOperandId inputId) {
}
bool WarpCacheIRTranspiler::emitLoadWrapperTarget(ObjOperandId objId,
- ObjOperandId resultId) {
+ ObjOperandId resultId,
+ bool fallible) {
MDefinition* obj = getOperand(objId);
- auto* ins = MLoadWrapperTarget::New(alloc(), obj);
+ auto* ins = MLoadWrapperTarget::New(alloc(), obj, fallible);
+ if (fallible) {
+ ins->setGuard();
+ }
add(ins);
return defineOperand(resultId, ins);
diff --git a/js/src/jit/arm/Architecture-arm.h b/js/src/jit/arm/Architecture-arm.h
index fa2ae8e0ed..00edac33da 100644
--- a/js/src/jit/arm/Architecture-arm.h
+++ b/js/src/jit/arm/Architecture-arm.h
@@ -32,7 +32,7 @@ namespace jit {
static const int32_t NUNBOX32_TYPE_OFFSET = 4;
static const int32_t NUNBOX32_PAYLOAD_OFFSET = 0;
-static const uint32_t ShadowStackSpace = 0;
+static constexpr uint32_t ShadowStackSpace = 0;
// How far forward/back can a jump go? Provide a generous buffer for thunks.
static const uint32_t JumpImmediateRange = 20 * 1024 * 1024;
diff --git a/js/src/jit/arm64/Architecture-arm64.h b/js/src/jit/arm64/Architecture-arm64.h
index 96bbc63848..7101709f18 100644
--- a/js/src/jit/arm64/Architecture-arm64.h
+++ b/js/src/jit/arm64/Architecture-arm64.h
@@ -551,7 +551,7 @@ static const uint32_t SpillSlotSize =
std::max(sizeof(Registers::RegisterContent),
sizeof(FloatRegisters::RegisterContent));
-static const uint32_t ShadowStackSpace = 0;
+static constexpr uint32_t ShadowStackSpace = 0;
// When our only strategy for far jumps is to encode the offset directly, and
// not insert any jump islands during assembly for even further jumps, then the
diff --git a/js/src/jit/arm64/vixl/Cpu-vixl.cpp b/js/src/jit/arm64/vixl/Cpu-vixl.cpp
index 12244e73e4..b425b286ee 100644
--- a/js/src/jit/arm64/vixl/Cpu-vixl.cpp
+++ b/js/src/jit/arm64/vixl/Cpu-vixl.cpp
@@ -214,7 +214,7 @@ CPUFeatures CPU::InferCPUFeaturesFromOS(
for (size_t i = 0; i < kFeatureBitCount; i++) {
if (auxv & (1UL << i)) features.Combine(kFeatureBits[i]);
}
-#elif defined(XP_MACOSX)
+#elif defined(XP_DARWIN)
// Apple processors have kJSCVT, kDotProduct, and kAtomics features.
features.Combine(CPUFeatures::kJSCVT, CPUFeatures::kDotProduct,
CPUFeatures::kAtomics);
diff --git a/js/src/jit/loong64/Architecture-loong64.h b/js/src/jit/loong64/Architecture-loong64.h
index 48745ee37a..29da43272f 100644
--- a/js/src/jit/loong64/Architecture-loong64.h
+++ b/js/src/jit/loong64/Architecture-loong64.h
@@ -335,7 +335,7 @@ static const uint32_t SpillSlotSize =
std::max(sizeof(Registers::RegisterContent),
sizeof(FloatRegisters::RegisterContent));
-static const uint32_t ShadowStackSpace = 0;
+static constexpr uint32_t ShadowStackSpace = 0;
static const uint32_t SizeOfReturnAddressAfterCall = 0;
// When our only strategy for far jumps is to encode the offset directly, and
diff --git a/js/src/jit/mips32/Architecture-mips32.h b/js/src/jit/mips32/Architecture-mips32.h
index 8e186d2c9c..4ce68032b2 100644
--- a/js/src/jit/mips32/Architecture-mips32.h
+++ b/js/src/jit/mips32/Architecture-mips32.h
@@ -20,7 +20,7 @@
namespace js {
namespace jit {
-static const uint32_t ShadowStackSpace = 4 * sizeof(uintptr_t);
+static constexpr uint32_t ShadowStackSpace = 4 * sizeof(uintptr_t);
// These offsets are specific to nunboxing, and capture offsets into the
// components of a js::Value.
diff --git a/js/src/jit/mips64/Architecture-mips64.h b/js/src/jit/mips64/Architecture-mips64.h
index d3db37ea2c..7bf6054a72 100644
--- a/js/src/jit/mips64/Architecture-mips64.h
+++ b/js/src/jit/mips64/Architecture-mips64.h
@@ -20,7 +20,7 @@ namespace js {
namespace jit {
// Shadow stack space is not required on MIPS64.
-static const uint32_t ShadowStackSpace = 0;
+static constexpr uint32_t ShadowStackSpace = 0;
// MIPS64 have 64 bit floating-point coprocessor. There are 32 double
// precision register which can also be used as single precision registers.
diff --git a/js/src/jit/moz.build b/js/src/jit/moz.build
index c0d2d5f2df..c49b4fcd9f 100644
--- a/js/src/jit/moz.build
+++ b/js/src/jit/moz.build
@@ -87,6 +87,7 @@ UNIFIED_SOURCES += [
"Sink.cpp",
"Snapshots.cpp",
"Trampoline.cpp",
+ "TrampolineNatives.cpp",
"TrialInlining.cpp",
"TypePolicy.cpp",
"ValueNumbering.cpp",
diff --git a/js/src/jit/none/Architecture-none.h b/js/src/jit/none/Architecture-none.h
index 2433234fbf..9218404992 100644
--- a/js/src/jit/none/Architecture-none.h
+++ b/js/src/jit/none/Architecture-none.h
@@ -157,7 +157,7 @@ struct FloatRegister {
inline bool hasUnaliasedDouble() { MOZ_CRASH(); }
inline bool hasMultiAlias() { MOZ_CRASH(); }
-static const uint32_t ShadowStackSpace = 0;
+static constexpr uint32_t ShadowStackSpace = 0;
static const uint32_t JumpImmediateRange = INT32_MAX;
#ifdef JS_NUNBOX32
diff --git a/js/src/jit/riscv64/Architecture-riscv64.h b/js/src/jit/riscv64/Architecture-riscv64.h
index c75bd05ff1..8d02e6e806 100644
--- a/js/src/jit/riscv64/Architecture-riscv64.h
+++ b/js/src/jit/riscv64/Architecture-riscv64.h
@@ -494,7 +494,7 @@ FloatRegister::LiveAsIndexableSet<RegTypeName::Any>(SetType set) {
inline bool hasUnaliasedDouble() { return false; }
inline bool hasMultiAlias() { return false; }
-static const uint32_t ShadowStackSpace = 0;
+static constexpr uint32_t ShadowStackSpace = 0;
static const uint32_t JumpImmediateRange = INT32_MAX;
#ifdef JS_NUNBOX32
diff --git a/js/src/jit/shared/LIR-shared.h b/js/src/jit/shared/LIR-shared.h
index d8b5693d85..74c11bd91b 100644
--- a/js/src/jit/shared/LIR-shared.h
+++ b/js/src/jit/shared/LIR-shared.h
@@ -649,14 +649,14 @@ class LApplyArgsGeneric
LIR_HEADER(ApplyArgsGeneric)
LApplyArgsGeneric(const LAllocation& func, const LAllocation& argc,
- const LBoxAllocation& thisv, const LDefinition& tmpobjreg,
- const LDefinition& tmpcopy)
+ const LBoxAllocation& thisv, const LDefinition& tmpObjReg,
+ const LDefinition& tmpCopy)
: LCallInstructionHelper(classOpcode) {
setOperand(0, func);
setOperand(1, argc);
setBoxOperand(ThisIndex, thisv);
- setTemp(0, tmpobjreg);
- setTemp(1, tmpcopy);
+ setTemp(0, tmpObjReg);
+ setTemp(1, tmpCopy);
}
MApplyArgs* mir() const { return mir_->toApplyArgs(); }
@@ -712,14 +712,14 @@ class LApplyArrayGeneric
LIR_HEADER(ApplyArrayGeneric)
LApplyArrayGeneric(const LAllocation& func, const LAllocation& elements,
- const LBoxAllocation& thisv, const LDefinition& tmpobjreg,
- const LDefinition& tmpcopy)
+ const LBoxAllocation& thisv, const LDefinition& tmpObjReg,
+ const LDefinition& tmpCopy)
: LCallInstructionHelper(classOpcode) {
setOperand(0, func);
setOperand(1, elements);
setBoxOperand(ThisIndex, thisv);
- setTemp(0, tmpobjreg);
- setTemp(1, tmpcopy);
+ setTemp(0, tmpObjReg);
+ setTemp(1, tmpCopy);
}
MApplyArray* mir() const { return mir_->toApplyArray(); }
@@ -746,13 +746,13 @@ class LConstructArgsGeneric
LConstructArgsGeneric(const LAllocation& func, const LAllocation& argc,
const LAllocation& newTarget,
const LBoxAllocation& thisv,
- const LDefinition& tmpobjreg)
+ const LDefinition& tmpObjReg)
: LCallInstructionHelper(classOpcode) {
setOperand(0, func);
setOperand(1, argc);
setOperand(2, newTarget);
setBoxOperand(ThisIndex, thisv);
- setTemp(0, tmpobjreg);
+ setTemp(0, tmpObjReg);
}
MConstructArgs* mir() const { return mir_->toConstructArgs(); }
@@ -784,13 +784,13 @@ class LConstructArrayGeneric
LConstructArrayGeneric(const LAllocation& func, const LAllocation& elements,
const LAllocation& newTarget,
const LBoxAllocation& thisv,
- const LDefinition& tmpobjreg)
+ const LDefinition& tmpObjReg)
: LCallInstructionHelper(classOpcode) {
setOperand(0, func);
setOperand(1, elements);
setOperand(2, newTarget);
setBoxOperand(ThisIndex, thisv);
- setTemp(0, tmpobjreg);
+ setTemp(0, tmpObjReg);
}
MConstructArray* mir() const { return mir_->toConstructArray(); }
@@ -816,6 +816,164 @@ class LConstructArrayGeneric
const LAllocation* getTempForArgCopy() { return getOperand(2); }
};
+class LApplyArgsNative
+ : public LCallInstructionHelper<BOX_PIECES, BOX_PIECES + 2, 2> {
+ public:
+ LIR_HEADER(ApplyArgsNative)
+
+ LApplyArgsNative(const LAllocation& func, const LAllocation& argc,
+ const LBoxAllocation& thisv, const LDefinition& tmpObjReg,
+ const LDefinition& tmpCopy)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, func);
+ setOperand(1, argc);
+ setBoxOperand(ThisIndex, thisv);
+ setTemp(0, tmpObjReg);
+ setTemp(1, tmpCopy);
+ }
+
+ static constexpr bool isConstructing() { return false; }
+
+ MApplyArgs* mir() const { return mir_->toApplyArgs(); }
+
+ uint32_t numExtraFormals() const { return mir()->numExtraFormals(); }
+
+ const LAllocation* getFunction() { return getOperand(0); }
+ const LAllocation* getArgc() { return getOperand(1); }
+
+ static const size_t ThisIndex = 2;
+
+ const LDefinition* getTempObject() { return getTemp(0); }
+ const LDefinition* getTempForArgCopy() { return getTemp(1); }
+};
+
+class LApplyArgsObjNative
+ : public LCallInstructionHelper<BOX_PIECES, BOX_PIECES + 2, 2> {
+ public:
+ LIR_HEADER(ApplyArgsObjNative)
+
+ LApplyArgsObjNative(const LAllocation& func, const LAllocation& argsObj,
+ const LBoxAllocation& thisv, const LDefinition& tmpObjReg,
+ const LDefinition& tmpCopy)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, func);
+ setOperand(1, argsObj);
+ setBoxOperand(ThisIndex, thisv);
+ setTemp(0, tmpObjReg);
+ setTemp(1, tmpCopy);
+ }
+
+ static constexpr bool isConstructing() { return false; }
+
+ MApplyArgsObj* mir() const { return mir_->toApplyArgsObj(); }
+
+ const LAllocation* getFunction() { return getOperand(0); }
+ const LAllocation* getArgsObj() { return getOperand(1); }
+
+ static const size_t ThisIndex = 2;
+
+ const LDefinition* getTempObject() { return getTemp(0); }
+ const LDefinition* getTempForArgCopy() { return getTemp(1); }
+
+ // argc is mapped to the same register as argsObj: argc becomes live as
+ // argsObj is dying, all registers are calltemps.
+ const LAllocation* getArgc() { return getOperand(1); }
+};
+
+class LApplyArrayNative
+ : public LCallInstructionHelper<BOX_PIECES, BOX_PIECES + 2, 2> {
+ public:
+ LIR_HEADER(ApplyArrayNative)
+
+ LApplyArrayNative(const LAllocation& func, const LAllocation& elements,
+ const LBoxAllocation& thisv, const LDefinition& tmpObjReg,
+ const LDefinition& tmpCopy)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, func);
+ setOperand(1, elements);
+ setBoxOperand(ThisIndex, thisv);
+ setTemp(0, tmpObjReg);
+ setTemp(1, tmpCopy);
+ }
+
+ static constexpr bool isConstructing() { return false; }
+
+ MApplyArray* mir() const { return mir_->toApplyArray(); }
+
+ const LAllocation* getFunction() { return getOperand(0); }
+ const LAllocation* getElements() { return getOperand(1); }
+
+ static const size_t ThisIndex = 2;
+
+ const LDefinition* getTempObject() { return getTemp(0); }
+ const LDefinition* getTempForArgCopy() { return getTemp(1); }
+
+ // argc is mapped to the same register as elements: argc becomes live as
+ // elements is dying, all registers are calltemps.
+ const LAllocation* getArgc() { return getOperand(1); }
+};
+
+class LConstructArgsNative : public LCallInstructionHelper<BOX_PIECES, 3, 2> {
+ public:
+ LIR_HEADER(ConstructArgsNative)
+
+ LConstructArgsNative(const LAllocation& func, const LAllocation& argc,
+ const LAllocation& newTarget,
+ const LDefinition& tmpObjReg, const LDefinition& tmpCopy)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, func);
+ setOperand(1, argc);
+ setOperand(2, newTarget);
+ setTemp(0, tmpObjReg);
+ setTemp(1, tmpCopy);
+ }
+
+ static constexpr bool isConstructing() { return true; }
+
+ MConstructArgs* mir() const { return mir_->toConstructArgs(); }
+
+ uint32_t numExtraFormals() const { return mir()->numExtraFormals(); }
+
+ const LAllocation* getFunction() { return getOperand(0); }
+ const LAllocation* getArgc() { return getOperand(1); }
+ const LAllocation* getNewTarget() { return getOperand(2); }
+
+ const LDefinition* getTempObject() { return getTemp(0); }
+ const LDefinition* getTempForArgCopy() { return getTemp(1); }
+};
+
+class LConstructArrayNative : public LCallInstructionHelper<BOX_PIECES, 3, 2> {
+ public:
+ LIR_HEADER(ConstructArrayNative)
+
+ LConstructArrayNative(const LAllocation& func, const LAllocation& elements,
+ const LAllocation& newTarget,
+ const LDefinition& tmpObjReg,
+ const LDefinition& tmpCopy)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, func);
+ setOperand(1, elements);
+ setOperand(2, newTarget);
+ setTemp(0, tmpObjReg);
+ setTemp(1, tmpCopy);
+ }
+
+ static constexpr bool isConstructing() { return true; }
+
+ MConstructArray* mir() const { return mir_->toConstructArray(); }
+
+ const LAllocation* getFunction() { return getOperand(0); }
+ const LAllocation* getElements() { return getOperand(1); }
+ const LAllocation* getNewTarget() { return getOperand(2); }
+
+ const LDefinition* getTempObject() { return getTemp(0); }
+ const LDefinition* getTempForArgCopy() { return getTemp(1); }
+
+ // argc is mapped to the same register as elements: argc becomes live as
+ // elements is dying, all registers are calltemps.
+ const LAllocation* getArgc() { return getOperand(1); }
+};
+
// Takes in either an integer or boolean input and tests it for truthiness.
class LTestIAndBranch : public LControlInstructionHelper<2, 1, 0> {
public:
diff --git a/js/src/jit/wasm32/Architecture-wasm32.h b/js/src/jit/wasm32/Architecture-wasm32.h
index d7726eaa5f..2419591664 100644
--- a/js/src/jit/wasm32/Architecture-wasm32.h
+++ b/js/src/jit/wasm32/Architecture-wasm32.h
@@ -161,7 +161,7 @@ struct FloatRegister {
inline bool hasUnaliasedDouble() { MOZ_CRASH(); }
inline bool hasMultiAlias() { MOZ_CRASH(); }
-static const uint32_t ShadowStackSpace = 0;
+static constexpr uint32_t ShadowStackSpace = 0;
static const uint32_t JumpImmediateRange = INT32_MAX;
#ifdef JS_NUNBOX32
diff --git a/js/src/jit/x86-shared/Architecture-x86-shared.h b/js/src/jit/x86-shared/Architecture-x86-shared.h
index b4701af284..72055efb7d 100644
--- a/js/src/jit/x86-shared/Architecture-x86-shared.h
+++ b/js/src/jit/x86-shared/Architecture-x86-shared.h
@@ -31,9 +31,9 @@ static const int32_t NUNBOX32_PAYLOAD_OFFSET = 0;
#endif
#if defined(JS_CODEGEN_X64) && defined(_WIN64)
-static const uint32_t ShadowStackSpace = 32;
+static constexpr uint32_t ShadowStackSpace = 32;
#else
-static const uint32_t ShadowStackSpace = 0;
+static constexpr uint32_t ShadowStackSpace = 0;
#endif
static const uint32_t JumpImmediateRange = INT32_MAX;